index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
45,725 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/serializers.py | from pydantic import BaseModel, validator
from typing import List
class MatchingMultiplicatorsParams(BaseModel):
tickers: List[str]
multiplicators: List[str]
@validator('tickers')
def _validate_tickers(cls, v):
if not v:
raise ValueError('tickers filed must not be empty')
return v
@validator('multiplicators')
def _validate_multiplicators(cls, v):
if not v:
raise ValueError('multiplicators filed must not be empty')
return v
class MatchingReportsParams(BaseModel):
tickers: List[str]
topics: List[str]
reports: List[str]
@validator('tickers')
def _validate_tickers(cls, v):
if not v:
raise ValueError('tickers filed must not be empty')
return v
@validator('topics')
def _validate_topics(cls, v):
if not v:
raise ValueError('topics filed must not be empty')
return v
@validator('reports')
def _validate_reports(cls, v):
if not v:
raise ValueError('reports filed must not be empty')
return v
class DividendModel(BaseModel):
ticker: str
amount: int
@validator('ticker')
def _validate_ticker(cls, v):
if v == '':
raise ValueError('ticker filed must not be empty')
return v
@validator('amount')
def _validate_amount(cls, v):
if v == 0:
raise ValueError('amount filed must not be empty')
return v
class MatchingDividendsParams(BaseModel):
dividends: List[DividendModel]
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,726 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/abc.py | from abc import ABC, abstractmethod
class ParserABC(ABC):
@abstractmethod
async def _request_html(self, ticker: str) -> str: pass
@abstractmethod
def _parse_html(self, html: str) -> dict: pass
async def _get_one(self, ticker: str) -> dict:
html = await self._request_html(ticker)
return {ticker: self._parse_html(html)}
async def get_data(self) -> dict:
result = {}
for ticker in self._tickers:
result.update(await self._get_one(ticker))
return result
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,727 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/routers/multiplicators.py | from fastapi import APIRouter, Request
from companies_matcher.app.serializers import MatchingMultiplicatorsParams
from companies_matcher.config import config
multiplicators_router = APIRouter()
@multiplicators_router.post("/match")
async def match_multiplicators(params: MatchingMultiplicatorsParams, request: Request):
result = await request.app.finviz(tickers=params.tickers, multiplicators=params.multiplicators).get_data()
return {'result': result}
@multiplicators_router.get("/list")
async def get_multiplicators_list(request: Request):
return {'result': config['finviz']['multiplicators']}
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,728 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/routers/index.py | from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
index_router = APIRouter()
templates = Jinja2Templates(directory="companies_matcher/app/front/templates")
@index_router.get("/", response_class=HTMLResponse)
async def match_multiplicators(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,729 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/marketwatch_parser.py | from bs4 import BeautifulSoup
from companies_matcher.config import config
from .abc import ParserABC
import aiohttp
class MarketwatchParser(ParserABC):
_url = config['marketwatch']['url']
_endpoint = config['marketwatch']['endpoint']
_headers = {'User-Agent': config['service']['userAgent']}
def __init__(self, report: str, tickers: list, topics: list):
self._tickers = tickers
self._topics = topics
self._report = report
@staticmethod
def _parse_period(soup: BeautifulSoup):
row = soup.find('thead', class_='table__header')
return [i.text for i in row.find_all('div', attrs={'class': 'cell__content'})[2:-1]]
@staticmethod
def _combine_data_with_period(data: dict, period: list):
result = {key: {} for key in period}
for n, item in enumerate(period):
for key in data.keys():
values = data[key][n]
result[item].update({key: values})
return result
async def _request_html(self, ticker: str):
url = self._url + f'{ticker}/{self._endpoint}/{self._report}'
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=self._headers) as resp:
return await resp.text()
def _parse_html(self, html: str):
data = dict()
soup = BeautifulSoup(html, "html.parser")
rows = soup.find_all('tr', attrs={'class': 'table__row'})
for row in rows:
if cells := row.find_all('div', class_='cell__content'):
topic = cells[0].text
if topic in self._topics:
values = [i.text for i in cells[2:-1]]
data[topic] = values
period = self._parse_period(soup)
result = self._combine_data_with_period(data, period)
return result
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,730 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/main.py | from fastapi import FastAPI
from companies_matcher.app.routers import index_router, multiplicators_router, reports_router, dividends_router
from companies_matcher.parsers import FinvizParser, MarketwatchParser
from fastapi.staticfiles import StaticFiles
app = FastAPI()
@app.on_event("startup")
async def init_app():
app.finviz = FinvizParser
app.marketwatch = MarketwatchParser
app.mount("/static", StaticFiles(directory="companies_matcher/app/front/static/css"), name="static")
app.mount("/scripts", StaticFiles(directory="companies_matcher/app/front/static/js"), name="scripts")
app.mount("/images", StaticFiles(directory="companies_matcher/app/front/static/images"), name="images")
app.include_router(multiplicators_router, prefix='/multiplicators')
app.include_router(reports_router, prefix='/reports')
app.include_router(dividends_router, prefix='/dividends')
app.include_router(index_router)
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,731 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/__init__.py | from .finviz_parser import FinvizParser
from .marketwatch_parser import MarketwatchParser
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,732 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/marketwatch_reports.py | from .marketwatch_parser import MarketwatchParser
import asyncio
from collections import defaultdict
def _combine_results(results: list, tickers: list):
combined_result = {ticker: defaultdict(dict) for ticker in tickers}
period = set()
for ticker in tickers:
for report in results:
for year in report[ticker].keys():
combined_result[ticker][year].update(report[ticker][year])
period.add(year)
return {'data': combined_result, 'period': sorted(list(period))}
async def get_reports(reports: list, tickers: list, topics: list):
parser = MarketwatchParser
aws = [parser(report=report, tickers=tickers, topics=topics).get_data() for report in reports]
results = await asyncio.gather(*aws)
return _combine_results(results, tickers)
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,733 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/routers/reports.py | from fastapi import APIRouter, Request
from companies_matcher.app.serializers import MatchingReportsParams
from companies_matcher.parsers.marketwatch_reports import get_reports
from companies_matcher.config import config
reports_router = APIRouter()
@reports_router.post("/match")
async def match_reports(params: MatchingReportsParams, request: Request):
result = await get_reports(reports=params.reports, tickers=params.tickers, topics=params.topics)
return {'result': result}
@reports_router.get("/income/topics")
async def get_income_topics(request: Request):
return {'result': config['marketwatch']['incomeTopics']}
@reports_router.get("/balance/topics")
async def get_balance_topics(request: Request):
return {'result': config['marketwatch']['balanceTopics']}
@reports_router.get("/cash/topics")
async def get_cash_topics(request: Request):
return {'result': config['marketwatch']['cashTopics']}
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,734 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/finviz_dividends.py | from companies_matcher.parsers.finviz_parser import FinvizParser
_multiplicator = 'Dividend'
def _join_result(data: list, dividends: dict):
for item in data:
t = item['ticker']
try:
div = dividends[t][_multiplicator]
item['total'] = round(float(div) * item['amount'], 2)
item['dividends'] = round(float(div), 2)
except KeyError:
item['total'] = 0
item['dividends'] = 0
except ValueError:
item['total'] = 0
item['dividends'] = 0
async def get_dividends(data: list):
tickers = [item['ticker'] for item in data]
parser = FinvizParser(tickers, [_multiplicator])
result = await parser.get_data()
_join_result(data, result)
return data
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,735 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/writer/writer.py | import xlwt
def write_to_xlsx(data: dict, column_headers: list, rows_headers: list):
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Matching", cell_overwrite_ok=True)
for x, column_header in enumerate(column_headers):
sheet.write(0, x + 1, column_header)
for y, rows_header in enumerate(rows_headers):
y += 1
sheet.write(y, 0, rows_header)
sheet.write(y, x + 1, data[column_header][rows_header])
book.save("matching.xlsx")
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,736 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/routers/__init__.py | from .index import index_router
from .multiplicators import multiplicators_router
from .reports import reports_router
from .dividends import dividends_router
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,737 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/app/routers/dividends.py | from fastapi import APIRouter
from companies_matcher.app.serializers import MatchingDividendsParams
from companies_matcher.parsers.finviz_dividends import get_dividends
dividends_router = APIRouter()
@dividends_router.post("/match")
async def match_dividends(params: MatchingDividendsParams):
data = params.dict()['dividends']
dividends = await get_dividends(data)
return {'result': dividends}
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,738 | t0theheart/companies-matcher | refs/heads/master | /companies_matcher/parsers/finviz_parser.py | from bs4 import BeautifulSoup
from companies_matcher.config import config
from .abc import ParserABC
import aiohttp
class FinvizParser(ParserABC):
_url = config['finviz']['url']
_headers = {'User-Agent': config['service']['userAgent']}
def __init__(self, tickers: list, multiplicators: list):
self._tickers = tickers
self._multiplicators = multiplicators
async def _request_html(self, ticker: str):
async with aiohttp.ClientSession() as session:
async with session.get(self._url, params={'t': ticker}, headers=self._headers) as resp:
return await resp.text()
def _parse_html(self, html: str):
result = dict()
soup = BeautifulSoup(html, "html.parser")
cells = soup.find_all('td', class_='snapshot-td2-cp')
for cell in cells:
key = cell.text
if key in self._multiplicators:
value = cell.find_next().text
result[key] = value
return result
| {"/companies_matcher/app/routers/multiplicators.py": ["/companies_matcher/app/serializers.py"], "/companies_matcher/parsers/marketwatch_parser.py": ["/companies_matcher/parsers/abc.py"], "/companies_matcher/app/main.py": ["/companies_matcher/app/routers/__init__.py", "/companies_matcher/parsers/__init__.py"], "/companies_matcher/parsers/__init__.py": ["/companies_matcher/parsers/finviz_parser.py", "/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/parsers/marketwatch_reports.py": ["/companies_matcher/parsers/marketwatch_parser.py"], "/companies_matcher/app/routers/reports.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/marketwatch_reports.py"], "/companies_matcher/parsers/finviz_dividends.py": ["/companies_matcher/parsers/finviz_parser.py"], "/companies_matcher/app/routers/__init__.py": ["/companies_matcher/app/routers/index.py", "/companies_matcher/app/routers/multiplicators.py", "/companies_matcher/app/routers/reports.py", "/companies_matcher/app/routers/dividends.py"], "/companies_matcher/app/routers/dividends.py": ["/companies_matcher/app/serializers.py", "/companies_matcher/parsers/finviz_dividends.py"], "/companies_matcher/parsers/finviz_parser.py": ["/companies_matcher/parsers/abc.py"]} |
45,802 | hyeseong-dev/20210403-assignment | refs/heads/main | /linewalks/migrations/0001_initial.py | # Generated by Django 3.1.7 on 2021-04-03 01:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Concept',
fields=[
('concept_id', models.IntegerField(primary_key=True, serialize=False)),
('concept_name', models.CharField(blank=True, max_length=255, null=True)),
('domain_id', models.CharField(blank=True, max_length=20, null=True)),
('vocabulary_id', models.CharField(blank=True, max_length=20, null=True)),
('concept_class_id', models.CharField(blank=True, max_length=20, null=True)),
('standard_concept', models.CharField(blank=True, max_length=1, null=True)),
('concept_code', models.CharField(blank=True, max_length=50, null=True)),
('valid_start_date', models.DateField(blank=True, null=True)),
('valid_end_date', models.DateField(blank=True, null=True)),
('invalid_reason', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'concept',
},
),
migrations.CreateModel(
name='Person',
fields=[
('person_id', models.BigIntegerField(primary_key=True, serialize=False)),
('gender_concept_id', models.IntegerField(blank=True, null=True)),
('year_of_birth', models.IntegerField(blank=True, null=True)),
('month_of_birth', models.IntegerField(blank=True, null=True)),
('day_of_birth', models.IntegerField(blank=True, null=True)),
('birth_datetime', models.DateTimeField(blank=True, null=True)),
('race_concept_id', models.IntegerField(blank=True, null=True)),
('ethnicity_concept_id', models.IntegerField(blank=True, null=True)),
('location_id', models.BigIntegerField(blank=True, null=True)),
('provider_id', models.BigIntegerField(blank=True, null=True)),
('care_site_id', models.BigIntegerField(blank=True, null=True)),
('person_source_value', models.CharField(blank=True, max_length=50, null=True)),
('gender_source_value', models.CharField(blank=True, max_length=50, null=True)),
('gender_source_concept_id', models.IntegerField(blank=True, null=True)),
('race_source_value', models.CharField(blank=True, max_length=50, null=True)),
('race_source_concept_id', models.IntegerField(blank=True, null=True)),
('ethnicity_source_value', models.CharField(blank=True, max_length=50, null=True)),
('ethnicity_source_concept_id', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'person',
},
),
migrations.CreateModel(
name='VisitOccurrence',
fields=[
('visit_occurrence_id', models.BigIntegerField(primary_key=True, serialize=False)),
('visit_concept_id', models.IntegerField(blank=True, null=True)),
('visit_start_date', models.DateField(blank=True, null=True)),
('visit_start_datetime', models.DateTimeField(blank=True, null=True)),
('visit_end_date', models.DateField(blank=True, null=True)),
('visit_end_datetime', models.DateTimeField(blank=True, null=True)),
('visit_type_concept_id', models.IntegerField(blank=True, null=True)),
('provider_id', models.BigIntegerField(blank=True, null=True)),
('care_site_id', models.BigIntegerField(blank=True, null=True)),
('visit_source_value', models.CharField(blank=True, max_length=50, null=True)),
('visit_source_concept_id', models.IntegerField(blank=True, null=True)),
('admitted_from_concept_id', models.IntegerField(blank=True, null=True)),
('admitted_from_source_value', models.CharField(blank=True, max_length=50, null=True)),
('discharge_to_source_value', models.CharField(blank=True, max_length=50, null=True)),
('discharge_to_concept_id', models.IntegerField(blank=True, null=True)),
('preceding_visit_occurrence_id', models.BigIntegerField(blank=True, null=True)),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='visit_occurrence', to='linewalks.person')),
],
options={
'db_table': 'visit_occurrence',
},
),
migrations.CreateModel(
name='DrugExposure',
fields=[
('drug_exposure_id', models.BigIntegerField(primary_key=True, serialize=False)),
('drug_concept_id', models.IntegerField(blank=True, null=True)),
('drug_exposure_start_date', models.DateField(blank=True, null=True)),
('drug_exposure_start_datetime', models.DateTimeField(blank=True, null=True)),
('drug_exposure_end_date', models.DateField(blank=True, null=True)),
('drug_exposure_end_datetime', models.DateTimeField(blank=True, null=True)),
('verbatim_end_date', models.DateField(blank=True, null=True)),
('drug_type_concept_id', models.IntegerField(blank=True, null=True)),
('stop_reason', models.CharField(blank=True, max_length=20, null=True)),
('refills', models.IntegerField(blank=True, null=True)),
('quantity', models.DecimalField(blank=True, decimal_places=65535, max_digits=65535, null=True)),
('days_supply', models.IntegerField(blank=True, null=True)),
('sig', models.TextField(blank=True, null=True)),
('route_concept_id', models.IntegerField(blank=True, null=True)),
('lot_number', models.CharField(blank=True, max_length=50, null=True)),
('provider_id', models.BigIntegerField(blank=True, null=True)),
('visit_detail_id', models.BigIntegerField(blank=True, null=True)),
('drug_source_value', models.CharField(blank=True, max_length=50, null=True)),
('drug_source_concept_id', models.IntegerField(blank=True, null=True)),
('route_source_value', models.CharField(blank=True, max_length=50, null=True)),
('dose_unit_source_value', models.CharField(blank=True, max_length=50, null=True)),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='drug_exposure', to='linewalks.person')),
('visit_occurrence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='drug_exposure', to='linewalks.visitoccurrence')),
],
options={
'db_table': 'drug_exposure',
},
),
migrations.CreateModel(
name='Death',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('death_date', models.DateField(blank=True, null=True)),
('death_datetime', models.DateTimeField(blank=True, null=True)),
('death_type_concept_id', models.IntegerField(blank=True, null=True)),
('cause_concept_id', models.BigIntegerField(blank=True, null=True)),
('cause_source_value', models.IntegerField(blank=True, null=True)),
('cause_source_concept_id', models.BigIntegerField(blank=True, null=True)),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='death', to='linewalks.person')),
],
options={
'db_table': 'death',
},
),
migrations.CreateModel(
name='ConditionOccurrence',
fields=[
('condition_occurrence_id', models.BigIntegerField(primary_key=True, serialize=False)),
('condition_concept_id', models.IntegerField(blank=True, null=True)),
('condition_start_date', models.DateField(blank=True, null=True)),
('condition_start_datetime', models.DateTimeField(blank=True, null=True)),
('condition_end_date', models.DateField(blank=True, null=True)),
('condition_end_datetime', models.DateTimeField(blank=True, null=True)),
('condition_type_concept_id', models.IntegerField(blank=True, null=True)),
('condition_status_concept_id', models.IntegerField(blank=True, null=True)),
('stop_reason', models.CharField(blank=True, max_length=20, null=True)),
('provider_id', models.BigIntegerField(blank=True, null=True)),
('visit_detail_id', models.BigIntegerField(blank=True, null=True)),
('condition_source_value', models.CharField(blank=True, max_length=50, null=True)),
('condition_source_concept_id', models.IntegerField(blank=True, null=True)),
('condition_status_source_value', models.CharField(blank=True, max_length=50, null=True)),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='condition_occurrence', to='linewalks.person')),
('visit_occurrence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='condition_occurrence', to='linewalks.visitoccurrence')),
],
options={
'db_table': 'condition_occurrence',
},
),
]
| {"/linewalks/views.py": ["/linewalks/models.py"]} |
45,803 | hyeseong-dev/20210403-assignment | refs/heads/main | /linewalks/views.py | import re
import json
from math import ceil
from django.http import JsonResponse
from django.views import View
from django.db.models import Q
from django.db import IntegrityError
from decorator import query_debugger
from linewalks.models import (
Person,
VisitOccurrence,
ConditionOccurrence,
Concept,
Death,
DrugExposure
)
class PatientView(View):
"""
성, 인종, 민족 환자수를 분류하여 해당 카테고리의 환자 수를 제공함은 물론 전체 환자 수와 사망자 수에 대한 정보 역시 제공하는 API
"""
@query_debugger
def get(self, request):
queryset = Person.objects.all().\
values('gender_concept_id','race_source_value', 'ethnicity_source_value')
results = {
'전체 환자 수' : '{:,}'.format(queryset.count() )+'명',
'성별 환자 수' : {
'남성' : '{:,}'.format(queryset.filter(gender_concept_id=8507).count())+'명',
'여성' : '{:,}'.format(queryset.filter(gender_concept_id=8532).count())+'명',
},
'인종별 환자 수' : {
'아시아인' : '{:,}'.format(queryset.filter(race_source_value='asian').count())+'명',
'흑인' : '{:,}'.format(queryset.filter(race_source_value='black').count())+'명',
'백인' : '{:,}'.format(queryset.filter(race_source_value='white').count())+'명',
},
'민족별 환자 수' : {
'히스패닉' : '{:,}'.format(queryset.filter(ethnicity_source_value='hispanic').count())+'명',
'비히스패닉' : '{:,}'.format(queryset.filter(ethnicity_source_value='nonhispanic').count())+'명',
},
'사망 환자 수' : '{:,}'.format(Death.objects.count())+'명'
}
if results:
return JsonResponse({'결과':results}, status=200)
return JsonResponse({'message':'INVALID_REQUEST'}, status=400)
class VisitView(View):
"""
방문 유형(입원/외래/응급), 성, 인종, 민족, 연령대로 분류하여 방문자수 정보를 제공하는 API
"""
@query_debugger
def get(self, request):
queryset = VisitOccurrence.objects.select_related('person')
results = {
'방문유형' : {
'입원' : '{:,}'.format(queryset.filter(visit_concept_id=9201).count())+'명',
'외래' : '{:,}'.format(queryset.filter(visit_concept_id=9202).count())+'명',
'응급' : '{:,}'.format(queryset.filter(visit_concept_id=9203).count())+'명',
},
'성별 방문 수' : {
'남성' : '{:,}'.format(queryset.filter(person__gender_concept_id=8507).count())+'명',
'여성' : '{:,}'.format(queryset.filter(person__gender_concept_id=8532).count())+'명',
},
'인종별 방문 수' : {
'히스패닉' : '{:,}'.format(queryset.filter(person__ethnicity_source_value='hispanic').count())+'명',
'비히스패닉' : '{:,}'.format(queryset.filter(person__ethnicity_source_value='nonhispanic').count())+'명',
},
'연령대별 방문 수' : {
f'{idx*10}~{idx*10+9}' : '{:,}'\
.format(queryset.filter(person__year_of_birth__range=(key-9, key)).count())+'명'
for idx,key in enumerate(list(range(2011,1921,-10)),1)
}
}
if results:
return JsonResponse({'결과':results}, status=200)
return JsonResponse({'message':'INVALID_REQUEST'}, status=400)
class ConceptListView(View):
'''
concept_id의 정보를 얻을 수 있는 API입니다.
- 쿼리 파라미터를 이용
+ 검색 기능
+ 키워드 검색
'''
@query_debugger
def get(self,request):
page = int(request.GET.get('page', 1))
PAGE_SIZE = 50
limit = page * PAGE_SIZE
offset = limit - PAGE_SIZE
concepts = Concept.objects.values('concept_id', 'concept_name')
people = Person.objects.values('gender_concept_id','ethnicity_source_value')
drugs = DrugExposure.objects.values('drug_concept_id')
conditions = ConditionOccurrence.objects.values('condition_concept_id')
results = {}
results['persons'] = [{
'gender_concept_id' : p['gender_concept_id'],
'ethnicity_source_value' : p['ethnicity_source_value'],
}for p in people[offset:limit]]
results['concepts'] = [{
'concept_id' : c['concept_id'],
'concept_name' : c['concept_name'],
}for c in concepts[offset:limit]]
results['conditions'] = [{
'condition_concept_id' : c['condition_concept_id'],
}for c in conditions[offset:limit]]
results['drugs'] = [{
'drug_concept_id' : d['drug_concept_id'],
}for d in drugs[offset:limit]]
if results:
return JsonResponse({
"페이지" : f'{page} / {ceil(concepts.count()/PAGE_SIZE)}',
'총 조회 건수': {
'people' :'{:,}'.format(people.count())+' 건',
'concepts' :'{:,}'.format(concepts.count())+' 건',
'drugs' :'{:,}'.format(drugs.count())+' 건',
'conditions' :'{:,}'.format(conditions.count())+' 건',
},
'결과':results,
}, status=200)
return JsonResponse({'message':'INVALID_REQUEST'}, status=400)
class SearchView(View):
"""
각 테이블의 row를 조회하는 API를 구현합니다.
- concept id와 concept name 매칭
+ concept의 의미를 알 수 있게 이름을 함께 return합니다.
- Pagination 기능
- 특정 컬럼 검색 기능
+ 키워드 검색
"""
@query_debugger
def get(self, request):
page = int(request.GET.get('page', 1))
PAGE_SIZE = 50
limit = page * PAGE_SIZE
offset = limit - PAGE_SIZE
results = {}
people = Person.objects.filter(
Q(person_id = request.GET.get('person_id'))|
Q(gender_concept_id = request.GET.get('gender_concept_id'))|
Q(race_concept_id = request.GET.get('race_concept_id'))|
Q(ethnicity_source_value = request.GET.get('ethnicity_source_value'))|
Q(birth_datetime = request.GET.get('birth_datetime'))
)
visit_occurrences = VisitOccurrence.objects.filter(
Q(person_id = request.GET.get('person_id'))|
Q(visit_occurrence_id = request.GET.get('visit_occurrence_id'))|
Q(visit_concept_id = request.GET.get('visit_concept_id'))|
Q(visit_start_datetime = request.GET.get('visit_start_datetime'))|
Q(visit_end_datetime = request.GET.get('visit_end_datetime'))
)
condition_occurrences= ConditionOccurrence.objects.filter(
Q(person_id = request.GET.get('person_id'))|
Q(condition_occurrence_id = request.GET.get('condition_occurrence_id'))|
Q(condition_concept_id = request.GET.get('condition_concept_id'))|
Q(condition_start_datetime = request.GET.get('condition_start_datetime'))|
Q(condition_end_datetime = request.GET.get('condition_end_datetime'))
)
drug_exposures = DrugExposure.objects.filter(
Q(person_id = request.GET.get('person_id'))|
Q(drug_concept_id = request.GET.get('drug_concept_id'))|
Q(visit_occurrence_id = request.GET.get('visit_occurrence_id'))|
Q(drug_exposure_start_datetime = request.GET.get('drug_exposure_start_datetime'))|
Q(drug_exposure_end_datetime = request.GET.get('drug_exposure_end_datetime'))
)[offset:limit]
results['people'] = [{
'person_id' : p.person_id,
'gender_concept_id' : p.gender_concept_id,
'birth_datetime' : p.birth_datetime,
'race_concept_id' : p.race_concept_id ,
'ethnicity_concept_id' : p.ethnicity_concept_id,
}for p in people[offset:limit] ]
results['visit_occurrences'] = [{
'person_id' : v.person_id,
'visit_occurrence_id' : v.visit_occurrence_id,
'visit_concept_id' : v.visit_concept_id,
'visit_start_datetime' : v.visit_start_datetime,
'visit_end_datetime' : v.visit_end_datetime,
}for v in visit_occurrences[offset:limit] ]
results['condition_occurrences'] = [{
'person_id' : c.person_id,
'condition_concept_id' : c. condition_concept_id,
'condition_start_datetime' : c. condition_start_datetime,
'condition_end_datetime' : c. condition_end_datetime,
'visit_occurrence_id' : c. visit_occurrence_id,
}for c in condition_occurrences[offset:limit] ]
results['drug_exposures'] = [{
'person_id' : d.person_id,
'drug_concept_id' : d.drug_concept_id,
'drug_exposure_start_datetime' : d.drug_exposure_start_datetime,
'drug_exposure_end_datetime' : d.drug_exposure_end_datetime,
'visit_occurrence_id' : d.visit_occurrence_id,
}for d in drug_exposures[offset:limit] ]
pcnt = people.count()
vcnt = visit_occurrences.count()
dcnt = drug_exposures.count()
ccnt = condition_occurrences.count()
max_page = max(pcnt, vcnt, dcnt,ccnt)
if results:
return JsonResponse({
'message':'SUCCESS',
"페이지" : f'{page} / {ceil(max_page/PAGE_SIZE)}',
'조회 건수': {
'people' :'{:,}'.format(pcnt)+' 건',
'concepts' :'{:,}'.format(vcnt)+' 건',
'drugs' :'{:,}'.format(dcnt)+' 건',
'conditions' :'{:,}'.format(cnt)+' 건',
},
'결과':results}, status=200)
return JsonResponse({'message':'INVALID_REQUEST'}, status=400)
| {"/linewalks/views.py": ["/linewalks/models.py"]} |
45,804 | hyeseong-dev/20210403-assignment | refs/heads/main | /linewalks/urls.py | from django.contrib import admin
from django.urls import path
from linewalks import views
from linewalks import models
urlpatterns = [
path('static/patients/', views.PatientView.as_view()),
path('static/visitors/', views.VisitView.as_view()) ,
path('concept/', views.ConceptListView.as_view()), # queryParameter
path('search/', views.SearchView.as_view()) ,
]
| {"/linewalks/views.py": ["/linewalks/models.py"]} |
45,805 | hyeseong-dev/20210403-assignment | refs/heads/main | /config/urls.py | from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('linewalks/', include('linewalks.urls')),
] | {"/linewalks/views.py": ["/linewalks/models.py"]} |
45,806 | hyeseong-dev/20210403-assignment | refs/heads/main | /linewalks/models.py | from django.db import models
class Concept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=255, blank=True, null=True)
domain_id = models.CharField(max_length=20, blank=True, null=True)
vocabulary_id = models.CharField(max_length=20, blank=True, null=True)
concept_class_id = models.CharField(max_length=20, blank=True, null=True)
standard_concept = models.CharField(max_length=1, blank=True, null=True)
concept_code = models.CharField(max_length=50, blank=True, null=True)
valid_start_date = models.DateField(blank=True, null=True)
valid_end_date = models.DateField(blank=True, null=True)
invalid_reason = models.CharField(max_length=1, blank=True, null=True)
class Meta:
db_table = 'concept'
class ConditionOccurrence(models.Model):
person = models.ForeignKey('linewalks.Person', on_delete=models.CASCADE, blank=True, null=True, related_name='condition_occurrence')
visit_occurrence = models.ForeignKey('linewalks.VisitOccurrence', on_delete=models.CASCADE, blank=True, null=True,related_name='condition_occurrence')
condition_occurrence_id = models.BigIntegerField(primary_key=True)
condition_concept_id = models.IntegerField(blank=True, null=True)
condition_start_date = models.DateField(blank=True, null=True)
condition_start_datetime = models.DateTimeField(blank=True, null=True)
condition_end_date = models.DateField(blank=True, null=True)
condition_end_datetime = models.DateTimeField(blank=True, null=True)
condition_type_concept_id = models.IntegerField(blank=True, null=True)
condition_status_concept_id = models.IntegerField(blank=True, null=True)
stop_reason = models.CharField(max_length=20, blank=True, null=True)
provider_id = models.BigIntegerField(blank=True, null=True)
visit_detail_id = models.BigIntegerField(blank=True, null=True)
condition_source_value = models.CharField(max_length=50, blank=True, null=True)
condition_source_concept_id = models.IntegerField(blank=True, null=True)
condition_status_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
db_table = 'condition_occurrence'
class Death(models.Model):
person = models.ForeignKey('linewalks.Person', on_delete=models.CASCADE, blank=True, null=True,related_name='death')
death_date = models.DateField(blank=True, null=True)
death_datetime = models.DateTimeField(blank=True, null=True)
death_type_concept_id = models.IntegerField(blank=True, null=True)
cause_concept_id = models.BigIntegerField(blank=True, null=True)
cause_source_value = models.IntegerField(blank=True, null=True)
cause_source_concept_id = models.BigIntegerField(blank=True, null=True)
class Meta:
db_table = 'death'
class DrugExposure(models.Model):
person = models.ForeignKey('linewalks.Person', on_delete=models.CASCADE, blank=True, null=True,related_name='drug_exposure')
visit_occurrence = models.ForeignKey('linewalks.VisitOccurrence', on_delete=models.CASCADE, blank=True, null=True,related_name='drug_exposure')
drug_exposure_id = models.BigIntegerField(primary_key=True)
drug_concept_id = models.IntegerField(blank=True, null=True)
drug_exposure_start_date = models.DateField(blank=True, null=True)
drug_exposure_start_datetime = models.DateTimeField(blank=True, null=True)
drug_exposure_end_date = models.DateField(blank=True, null=True)
drug_exposure_end_datetime = models.DateTimeField(blank=True, null=True)
verbatim_end_date = models.DateField(blank=True, null=True)
drug_type_concept_id = models.IntegerField(blank=True, null=True)
stop_reason = models.CharField(max_length=20, blank=True, null=True)
refills = models.IntegerField(blank=True, null=True)
quantity = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
days_supply = models.IntegerField(blank=True, null=True)
sig = models.TextField(blank=True, null=True)
route_concept_id = models.IntegerField(blank=True, null=True)
lot_number = models.CharField(max_length=50, blank=True, null=True)
provider_id = models.BigIntegerField(blank=True, null=True)
visit_detail_id = models.BigIntegerField(blank=True, null=True)
drug_source_value = models.CharField(max_length=50, blank=True, null=True)
drug_source_concept_id = models.IntegerField(blank=True, null=True)
route_source_value = models.CharField(max_length=50, blank=True, null=True)
dose_unit_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
db_table = 'drug_exposure'
class Person(models.Model):
person_id = models.BigIntegerField(primary_key=True,)
gender_concept_id = models.IntegerField(blank=True, null=True)
year_of_birth = models.IntegerField(blank=True, null=True)
month_of_birth = models.IntegerField(blank=True, null=True)
day_of_birth = models.IntegerField(blank=True, null=True)
birth_datetime = models.DateTimeField(blank=True, null=True)
race_concept_id = models.IntegerField(blank=True, null=True)
ethnicity_concept_id = models.IntegerField(blank=True, null=True)
location_id = models.BigIntegerField(blank=True, null=True)
provider_id = models.BigIntegerField(blank=True, null=True)
care_site_id = models.BigIntegerField(blank=True, null=True)
person_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_concept_id = models.IntegerField(blank=True, null=True)
race_source_value = models.CharField(max_length=50, blank=True, null=True)
race_source_concept_id = models.IntegerField(blank=True, null=True)
ethnicity_source_value = models.CharField(max_length=50, blank=True, null=True)
ethnicity_source_concept_id = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'person'
class VisitOccurrence(models.Model):
person = models.ForeignKey('linewalks.Person', on_delete=models.CASCADE, blank=True, null=True, related_name='visit_occurrence')
visit_occurrence_id = models.BigIntegerField(primary_key=True,)
visit_concept_id = models.IntegerField(blank=True, null=True)
visit_start_date = models.DateField(blank=True, null=True)
visit_start_datetime = models.DateTimeField(blank=True, null=True)
visit_end_date = models.DateField(blank=True, null=True)
visit_end_datetime = models.DateTimeField(blank=True, null=True)
visit_type_concept_id = models.IntegerField(blank=True, null=True)
provider_id = models.BigIntegerField(blank=True, null=True)
care_site_id = models.BigIntegerField(blank=True, null=True)
visit_source_value = models.CharField(max_length=50, blank=True, null=True)
visit_source_concept_id = models.IntegerField(blank=True, null=True)
admitted_from_concept_id = models.IntegerField(blank=True, null=True)
admitted_from_source_value = models.CharField(max_length=50, blank=True, null=True)
discharge_to_source_value = models.CharField(max_length=50, blank=True, null=True)
discharge_to_concept_id = models.IntegerField(blank=True, null=True)
preceding_visit_occurrence_id = models.BigIntegerField(blank=True, null=True)
class Meta:
db_table = 'visit_occurrence'
| {"/linewalks/views.py": ["/linewalks/models.py"]} |
45,809 | saadatullah/fleet | refs/heads/master | /vehicles/models.py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class VehMaster(models.Model):
Veh_Reg_No = models.CharField("Registration No", max_length=8, primary_key=True)
Veh_Make = models.CharField("Vehicle's Make", max_length=15)
Veh_Model = models.CharField("Vehicle's Model", max_length=15)
Veh_Year = models.IntegerField("Year Manufactured", blank=True, null=True)
Veh_Pur_date = models.DateField('Purchase Date', blank=True, null=True)
Veh_Pur_price = models.IntegerField('Purchase Price', blank=True, null=True)
Veh_Sale_date = models.DateField('Sales Date', blank=True, null=True)
Veh_Sale_price = models.IntegerField('Sales Price', blank=True, null=True)
Veh_Seats = models.IntegerField('No of Seats', blank=True, null=True)
Veh_Horsepower = models.IntegerField('Power CC', blank=True, null=True)
Veh_Colour = models.CharField('Colour',max_length=20, blank=True, null=True)
Veh_Fuel_Type = models.CharField('Fuel Type', max_length=10, blank=True, null=True)
Veh_Engine_No = models.CharField('Engine No', max_length=20, blank=True, null=True)
Veh_Chassis_No = models.CharField('Chassis No', max_length=20, blank=True, null=True)
Veh_Status = models.CharField('Status', max_length=10)
def __str__(self):
return self.Veh_Reg_No
class VehWorkOrder(models.Model):
Work_Order_No = models.CharField(max_length=25, primary_key=True, blank=False, null=False)
Work_Reg_No = models.ForeignKey(VehMaster, verbose_name='Registration No', on_delete=models.CASCADE)
Work_Type = models.CharField("Tyep of Work", max_length=25)
Work_Invoice_Ref = models.CharField("Work Invoice No", max_length=25)
Work_Odometer = models.IntegerField("Odometer Reading", blank=True, null=True)
Work_Man_Name = models.CharField("Expert's Name", max_length=25, blank=True, null=True)
Work_Details = models.CharField(max_length=200)
Work_Date = models.DateField('Work Order Date', blank=True, null=True)
Work_Price = models.DecimalField(max_digits=5, decimal_places=2)
Work_Remaks = models.CharField("Remarks", max_length=200, blank=True, null=True)
def __str__(self):
return str(self.Work_Reg_No)
class VehWorkInvoice(models.Model):
Inv_Order_No = models.ForeignKey(VehWorkOrder, verbose_name='Work Order No', on_delete=models.CASCADE)
Inv_No = models.CharField("Invoice No", max_length=25)
Inv_Date = models.DateField("Invoice Date")
Inv_Customer_Name = models.CharField("Customer Name", max_length=30)
Inv_Customer_Address1 = models.CharField("Address Line 1", max_length=40, blank=True, null=True)
Inv_Customer_Address2 = models.CharField("Address Line 2", max_length=40, blank=True, null=True)
Inv_Customer_City = models.CharField("City", max_length=30, blank=True, null=True)
Inv_Customer_Contact = models.CharField("Contact No", max_length=15, blank=True, null=True)
Inv_Charge_Type = models.CharField('Charge Type', max_length=35, blank=True, null=True)
Inv_Price = models.DecimalField(max_digits=5, decimal_places=2)
Inv_VAT_Persent = models.IntegerField("VAT Persentage", blank=True, null=True)
def __str__(self):
return self.Inv_Order_No
class VehInvoice(models.Model):
Work_Order_No = models.ForeignKey(VehWorkOrder, to_field='Work_Order_No', verbose_name='Registration No', on_delete=models.CASCADE)
Inv_No = models.CharField("Invoice No", max_length=25)
Inv_Date = models.DateField("Invoice Date")
Inv_Customer_Name = models.CharField("Customer Name", max_length=30)
Inv_Customer_Contact = models.CharField("Contact No", max_length=15, blank=True, null=True)
Inv_Price = models.DecimalField(max_digits=5, decimal_places=2)
def __str__(self):
return str(self.Work_Order_No)
class VehTyres(models.Model):
Tyre_Reg_No = models.ForeignKey(VehMaster, verbose_name='Registration No', on_delete=models.CASCADE)
Tyre_Tracking = models.CharField("Tyre No", max_length=25)
Tyre_Date = models.DateField("Tyre Date")
Tyre_Shop = models.CharField("Tyre Shop", max_length=30)
Inv_Customer_Contact = models.CharField("Contact No", max_length=15, blank=True, null=True)
Tyre_Price = models.DecimalField(max_digits=5, decimal_places=2)
Tyre_OSF = models.IntegerField("OSF", blank=True, null=True)
Tyre_NSF = models.IntegerField("NSF", blank=True, null=True)
Tyre_OSR = models.IntegerField("OSR", blank=True, null=True)
Tyre_NSR = models.IntegerField("NSR", blank=True, null=True)
def __str__(self):
return self.Tyre_Reg_No
# , on_delete=models.CASCADE
| {"/vehicles/admin.py": ["/vehicles/models.py"]} |
45,810 | saadatullah/fleet | refs/heads/master | /vehicles/admin.py | from django.contrib import admin
# Register your models here.
from .models import VehMaster, VehWorkOrder, VehWorkInvoice, VehInvoice, VehTyres
from django.forms import TextInput, Textarea, ModelForm
from django.forms.models import inlineformset_factory
from django.db import models
class VehWorkOrderInline(admin.TabularInline):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'23'})},
models.DecimalField: {'widget': TextInput(attrs={'size':'7'})},
models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':25})},
}
model = VehWorkOrder
extra = 1
class VehMasterAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'20', 'padding-left':'1px'})},
}
list_display = ('Veh_Reg_No', 'Veh_Make', 'Veh_Model', 'Veh_Year', 'Veh_Status',
'Veh_Seats', 'Veh_Horsepower',
'Veh_Fuel_Type', 'Veh_Pur_date', 'Veh_Pur_price',
)
fields = [('Veh_Reg_No', 'Veh_Make', 'Veh_Model', 'Veh_Status', 'Veh_Year'),
('Veh_Engine_No', 'Veh_Chassis_No', 'Veh_Seats', 'Veh_Fuel_Type',
'Veh_Horsepower', 'Veh_Colour'),
('Veh_Sale_price', 'Veh_Pur_price', 'Veh_Sale_date', 'Veh_Pur_date')
]
inlines = [VehWorkOrderInline]
#-------------------------------------------------
class VehWorkInvoiceInline(admin.TabularInline):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'23'})},
models.DecimalField: {'widget': TextInput(attrs={'size':'7'})},
models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':25})},
}
model = VehWorkInvoice
extra = 1
class VehWorkOrderAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'20'})},
models.DecimalField: {'widget': TextInput(attrs={'size':'7'})},
}
list_display = ('Work_Reg_No', 'Work_Order_No', 'Work_Date', 'Work_Man_Name',
'Work_Price', 'Work_Type', 'Work_Invoice_Ref', 'Work_Odometer',
'Work_Date','Work_Remaks', 'Work_Details',
)
fields = [('Work_Reg_No', 'Work_Order_No', 'Work_Man_Name'),
('Work_Date', 'Work_Invoice_Ref', 'Work_Type'),
('Work_Remaks', 'Work_Details', 'Work_Odometer', 'Work_Price' )
]
inlines = [VehWorkInvoiceInline]
#-------------------------------------------------
'''
class VehInvoiceInline(admin.TabularInline):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'23'})},
models.DecimalField: {'widget': TextInput(attrs={'size':'7'})},
models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':25})},
}
model = VehInvoice
extra = 1
class VehWorkOrderAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'20'})},
models.DecimalField: {'widget': TextInput(attrs={'size':'7'})},
}
list_display = ('Work_Reg_No', 'Work_Order_No', 'Work_Date', 'Work_Man_Name',
'Work_Price', 'Work_Type', 'Work_Invoice_Ref', 'Work_Odometer',
'Work_Date','Work_Remaks', 'Work_Details',
)
fields = [('Work_Reg_No', 'Work_Order_No', 'Work_Man_Name'),
('Work_Date', 'Work_Invoice_Ref', 'Work_Type'),
('Work_Remaks', 'Work_Details', 'Work_Odometer', 'Work_Price' )
]
inlines = [VehInvoiceInline]
'''
admin.site.register(VehMaster, VehMasterAdmin)
admin.site.register(VehWorkOrder, VehWorkOrderAdmin)
#admin.site.register(VehInvoice)
admin.site.register(VehTyres)
# 'Veh_Engine_No', 'Veh_Chassis_No', VehWorkInvoice, VehTyres
| {"/vehicles/admin.py": ["/vehicles/models.py"]} |
45,811 | saadatullah/fleet | refs/heads/master | /vehicles/migrations/0003_vehinvoice.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-02 15:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('vehicles', '0002_vehworkinvoice'),
]
operations = [
migrations.CreateModel(
name='VehInvoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Inv_No', models.CharField(max_length=25, verbose_name='Invoice No')),
('Inv_Date', models.DateField(verbose_name='Invoice Date')),
('Inv_Customer_Name', models.CharField(max_length=30, verbose_name='Customer Name')),
('Inv_Customer_Contact', models.CharField(blank=True, max_length=15, null=True, verbose_name='Contact No')),
('Inv_Price', models.DecimalField(decimal_places=2, max_digits=5)),
('Inv_Order_No', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicles.VehWorkOrder', verbose_name='Work Order No')),
],
),
]
| {"/vehicles/admin.py": ["/vehicles/models.py"]} |
45,818 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_clinical_trial_tables.py | import os
import requests
import pandas as pd
import datatable as dt
from urllib3.exceptions import HTTPError
from PharmacoDI.get_chembl_drug_targets import parallelize
from PharmacoDI.combine_pset_tables import join_tables, write_table
# TODO: split into more helpers?
def build_clinical_trial_tables(output_dir):
"""
Build the clinical trial and drug trial tables by querying the
clinicaltrial.gov API. Queries are made by drug names from the drug
synonyms table.
@param output_dir: [`string`] The file path to the directory with all
PharmacoDB tables
@return: None
"""
# Load drug synonym table
drug_file = os.path.join(output_dir, 'drug_synonym.csv')
drug_df = pd.read_csv(drug_file)[['drug_id', 'drug_name']]
# Query clinicaltrials.gov API to get clinical trials by drug name
print('Getting clinical trials from clinicaltrials.gov...')
all_studies = parallelize(list(drug_df['drug_name']),
get_clinical_trials_by_drug_names, 50)
studies_df = pd.concat(all_studies)
# Explode list-like columns into separate rows, duplicating the index
# I only use this because all the fields are returned in arrays for some reason
object_columns = studies_df.dtypes[studies_df.dtypes ==
'object'].index.values
for column in object_columns:
studies_df = studies_df.explode(column)
# Drop and rename columns
studies_df.drop(columns='Rank', inplace=True)
studies_df.rename(columns={'OrgStudyId': 'clinical_trial_id',
'NCTId': 'nct',
'SeeAlsoLinkURL': 'link',
'OverallStatus': 'status'}, inplace=True)
# Build clinical trials table
clin_trial_df = studies_df[['clinical_trial_id',
'nct', 'link', 'status']].copy()
clin_trial_df.drop_duplicates('clinical_trial_id', inplace=True)
clin_trial_df.reset_index(inplace=True, drop=True)
write_table(dt.Frame(clin_trial_df), 'clinical_trial',
output_dir, add_index=False)
# Build drug trial table
drug_trial_df = studies_df[['clinical_trial_id', 'drug_name']].copy()
drug_trial_df.drop_duplicates(inplace=True)
drug_trial_df = pd.merge(drug_trial_df, drug_df, on='drug_name')
drug_trial_df.drop(columns='drug_name', inplace=True)
write_table(dt.Frame(drug_trial_df), 'drug_trial',
output_dir, add_index=False)
# TODO: shorter names please?
def get_clinical_trials_by_drug_names(drug_names):
"""
Given a list of drug_names, query the clinicaltrial.gov API iteratively
to get all trials related to these drugs and return these studies in a table.
@param drug_names: [`list(string)`] A list of (up to 50) drug names
@return: [`pd.DataFrame`] A table of all studies, including their rank, study ID,
NCT id, recruitment status, link, and drug name.
"""
all_studies = []
for drug_name in drug_names:
min_rank = 1
max_rank = 1000
# Make initial API call for this drug
studies, num_studies_returned, num_studies_found = get_clinical_trials_for_drug(
drug_name, min_rank, max_rank)
# If not all studies were returned, make additional calls
while num_studies_found > num_studies_returned:
min_rank += 1000
max_rank += 1000
more_studies, n_returned, n_found = get_clinical_trials_for_drug(
drug_name, min_rank, max_rank)
studies = pd.concat([studies, more_studies])
num_studies_returned += n_returned
studies['drug_name'] = drug_name
all_studies.append(studies)
return pd.concat(all_studies)
def get_clinical_trials_for_drug(drug_name, min_rank, max_rank):
"""
Given a drug_name, query the clinicaltrial.gov API to get all trials
for this drug between min_rank and max_rank (inclusive). Return the
studies in a table. If the HTTP request fails or if no studies are
returned, returns an empty DataFrame.
@param drug_name: [`string`] A drug name
@param min_rank: [`int`] The minimum rank of a retrieved study
@param max_rank: [`int`] The maximum rank of a retrieved study
@return: [`pd.DataFrame`] A table of retrieved studies, including
their rank, study ID, NCT id, recruitment status, and link.
"""
# Make API call
base_url = 'https://clinicaltrials.gov/api/query/study_fields'
params = {
'expr': drug_name,
'fields': 'OrgStudyId,NCTId,OverallStatus,SeeAlsoLinkURL',
'min_rnk': min_rank,
'max_rnk': max_rank,
'fmt': 'json'
}
r = requests.get(base_url, params=params)
studies = pd.DataFrame(columns=['Rank', 'OrgStudyId', 'NCTId',
'OverallStatus', 'SeeAlsoLinkURL'])
# Check that request was successful
if r.status_code != 200:
print(f'API call for clinical trials related to {drug_name}',
' failed for the following reason:\n', r.json()['Error'])
return studies, 0, 0
response = r.json()['StudyFieldsResponse']
if 'StudyFields' in response:
studies = pd.DataFrame(response['StudyFields'])
return studies, response['NStudiesFound'], response['NStudiesReturned']
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,819 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_dataset_join_tables.py | import pandas as pd
from PharmacoDI.build_all_pset_tables import build_cell_df, build_drug_df, build_tissue_df
def build_dataset_join_dfs(pset_dict, pset_name, primary_dfs={}):
"""
Builds join tables summarizing the cell lines, tissues, and compounds
in this PSet.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param primary_dfs: [`pd.DataFrame`] A dictionary containing primary tables (cell,
tissue, compound/drug) from this PSet
@return: [`dict(string: pd.DataFrame)`] A dictionary of the join tables, with
table names as keys
"""
cell_df = primary_dfs['cell'] if 'cell' in primary_dfs else None
tissue_df = primary_dfs['tissue'] if 'tissue' in primary_dfs else None
compound_df = primary_dfs['drug'] if 'drug' in primary_dfs else None
join_dfs = {}
join_dfs['dataset_cell'] = build_dataset_cell_df(
pset_dict, pset_name, cell_df)
join_dfs['dataset_tissue'] = build_dataset_tissue_df(
pset_dict, pset_name, tissue_df)
join_dfs['dataset_compound'] = build_dataset_compound_df(
pset_dict, pset_name, compound_df)
return join_dfs
def build_dataset_cell_df(pset_dict, pset_name, cell_df=None):
"""
Builds a join table summarizing the cell lines in this PSet.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param cell_df: [`pd.DataFrame`] The cell table for this PSet
@return: [`pd.DataFrame`] The join table with all cell lines in this PSet
"""
if cell_df is None:
cell_df = build_cell_df(pset_dict)
dataset_cell_df = pd.DataFrame(
{'dataset_id': pset_name, 'cell_id': cell_df['name']})
return dataset_cell_df
def build_dataset_tissue_df(pset_dict, pset_name, tissue_df=None):
"""
Builds a join table summarizing the tissues in this PSet.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param tissue_df: [`pd.DataFrame`] The tissue table for this PSet
@return: [`pd.DataFrame`] The join table with all tissues in this PSet
"""
if tissue_df is None:
tissue_df = build_tissue_df(pset_dict)
dataset_tissue_df = pd.DataFrame(
{'dataset_id': pset_name, 'tissue_id': tissue_df})
return dataset_tissue_df
def build_dataset_compound_df(pset_dict, pset_name, compound_df=None):
"""
Builds a join table summarizing the drugs/compounds in this PSet.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param compound_df: [`pd.DataFrame`] The drug/compound table for this PSet
@return: [`pd.DataFrame`] The join table with all compounds/drugs in this PSet
"""
if compound_df is None:
compound_df = build_drug_df(pset_dict)
dataset_compound_df = pd.DataFrame(
{'dataset_id': pset_name, 'compound_id': compound_df})
return dataset_compound_df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,820 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/get_gene_targets.py | import pandas as pd
from bs4 import BeautifulSoup
import time
from selenium import webdriver
import requests
ensembl_gene_id = 'ENSG00000140465'
def get_gene_targets(ensembl_gene_id):
"""
Query the APIs of Ensembl and NCBI to fetch information about drugs targeting a given gene.
:param gene_symbol: [string] The gene symbol for the desired target gene
:param ensembl_gene_id: [string] The Ensembl gene id for the desired target gene
:return: [DataFrame] containing annotations for the specified gene
"""
# Configure requests to automatically raise errors
http = requests.Session()
assert_status_hook = lambda response, *args, **kwargs: response.raise_for_status()
http.hooks["response"] = [assert_status_hook]
# Query the APIs
queries = {'genecards_query':
f"https://www.genecards.org/cgi-bin/carddisp.pl?id={ensembl_gene_id.upper()}&idtype=ensembl",
'ncbi_query': f"https://www.ncbi.nlm.nih.gov/gene/?term={ensembl_gene_id.upper()}"},
f"http://useast.ensembl.org/Homo_sapiens/Gene/Summary?g={ensembl_gene_id.upper()}"
api_requests = {key: requests.get(query) for key, query in queries.items()}
# Retry if 403
is_403 = [req.status_code == "403" for req in api_requests.values()]
if (any(is_403)):
to_update = queries.keys()[is_403]
updates = [requests.get(query) for query in queries.values()[is_403]]
api_requests.replace(dict(zip(to_update, updates))) # Map replacement by value to dict key
parsed_annotation_data = \
{key: BeautifulSoup(request.text, 'html.parser') if request.status_code == "200" else None
for key, request in api_requests.items()}
def scrape_genecards(ensembl_gene_id):
"""
Use headless Firefox browser to make get requests to Genecards despite their anti-scraping software
WARNING: Requires Firefox and geckodriver be installed to work!
:param ensembl_gene_id: [string] The ENSEMBL id for the gene you want to query
:return: [DataFrame] containing the gene card data
"""
# Configure to run Chrome/Chromium headless
options = webdriver.FirefoxOptions()
#options.add_argument("-disable-extensions")
#options.add_argument("-disable-gpu")
#options.add_argument("-no-sandbox")
#options.add_argument("-headless")
# Initialize Chrome/Chromium
driver = webdriver.Firefox(options=options)
# Build the HTTP query and use the browser to get it
ensembl_query = f"https://www.genecards.org/cgi-bin/carddisp.pl?id={ensembl_gene_id.upper()}&idtype=ensembl"
driver.get(ensembl_query)
expand_table_elements = driver.find_elements_by_xpath("//a[@data-role='show-all']")
for element in expand_table_elements:
time.sleep(1)
element.click()
page_html = driver.page_source
del driver
# Parse the page HTML to a DataFrame
parsed_html = BeautifulSoup(page_html, 'html.parser')
genecards_dfs = pd.read_html(str(parsed_html.find_all('table')))
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,821 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/setup.py | from setuptools import setup, find_packages
setup(name='PharmacoDI',
version='0.0.1',
description="Tools for processing R PharmacoSet objects into .csv files of PharmacoDB database tables.",
url='https://github.com/bhklab/DataIngestion/tree/master/PharmacoDI',
install_requires=[
'dask[dataframe]',
'swifter',
'datatable',
'pandas',
'chembl_webresource_client',
'wget',
'bs4',
'selenium',
'lxml'
],
author='Evgeniya Gorobets, Christopher Eeles, Benjamin Haibe-Kains',
author_email='christopher.eeles@uhnresearch.ca, benjamin.haibe.kains@utoronto.ca',
license='MIT',
packages=find_packages(),
zip_safe=False
) | {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,822 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/get_chembl_targets.py | from chembl_webresource_client.new_client import new_client
import pandas as pd
import os
def get_chembl_targets(target_file):
"""
Get all ChEMBL targets in humans and write them to a table.
:target_file: full file path to where the target table should be written
:return: a DataFrame containing all targets from ChEMBL
"""
print('Getting all targets from ChEMBL...')
# Initiate connection to target table
target = new_client.target
# Parse all human targets into a DataFrame
target_result = target.filter(organism__in=['Homo sapiens'])
results = list(target_result)
target_df = pd.DataFrame(results)
# Explode list-like columns into separate rows, duplicating the index
object_columns = target_df.dtypes[target_df.dtypes ==
'object'].index.values
for column in object_columns:
target_df = target_df.explode(column)
# Drop any targets without cross refs
target_df = target_df.query("cross_references.notna()").copy()
# Expand cols with dtype dict into their cols for each key
for col in ['cross_references', 'target_components']:
dict_col = pd.json_normalize(target_df[col], max_level=0)
dict_col.index = target_df.index
target_df.drop(columns=col, inplace=True)
target_df = pd.merge(target_df, dict_col,
left_index=True, right_index=True)
# Drop target component cols with dicts (for now; TODO: keep them and also expand them?)
target_df.drop(columns=['target_component_synonyms',
'target_component_xrefs'], inplace=True)
target_df.drop_duplicates(inplace=True)
target_df.to_csv(target_file)
return target_df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,823 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_pset_gene_drugs.py | import os
import glob
import pandas as pd
import numpy as np
def read_gene_signatures(pset_name, file_path):
"""
Read all gene signatures for a PSet (to be used in gene_drugs table) from the directory file_path.
@param pset_name: [`string`] The name of the PSet
@param file_path: [`string`] The directory that holds all gene signature files
@return: [`DataFrame`] A dataframe containing all gene signatures for the PSet
"""
# Find correct pset gene signature CSV file
pset_file = glob.glob(
f'{os.path.join(file_path, pset_name, pset_name)}_gene_sig.csv')
if len(pset_file) == 0:
raise ValueError(
f'No PSet gene signatures file named {pset_name} could be found in {file_path}')
# Read .csv file and return df
return pd.read_csv(pset_file[0])
def build_gene_drug_df(gene_sig_dir, pset_name):
"""
TODO - ask Chris to explain this table again
@param gene_sig_dir: [`string`] The file path to the directory containing the gene
signatures for each PSet
@param pset_name: [`string`] The name of the PSet
@return: [`DataFrame`] The gene_drugs table for this PSet, containing all stats (?)
"""
# If gene signature file doesn't exist, return empty DataFrame
if not os.path.exists(os.path.join(gene_sig_dir, pset_name)):
print(
f'WARNING: gene signature annotations file does not exist for {pset_name} in {gene_sig_dir}')
return None
# Get gene_sig_df from gene_sig_file
gene_sig_df = read_gene_signatures(pset_name, gene_sig_dir)
# Extract relevant columns
gene_drug_df = gene_sig_df[[
'gene', 'drug', 'estimate', 'n', 'pvalue', 'df', 'fdr', 'tissue', 'mDataType']].copy()
# TODO - cannot find 'se', 'sens_stat' -- is one of these 'significant'???
# Chris: You will determine significance based on the fdr (false discovery rate) at alpha = 0.05, it will be TRUE or FALSE (or 1 or 0)
# Chris: 'se' - leave NA/Null/None for now, it will be added as a column to the gene signatures the next time we run them.
gene_drug_df['se'] = np.nan
# Chris: 'sens_stat' - I will add this to the function for extracting per PSet gene signatures - for now it is always 'AAC' (Area above dose-response curve)
gene_drug_df['sens_stat'] = 'AAC'
# TODO - cannot find 'drug_like_molecule', 'in_clinical_trials'
# Chris: Have renamed it to tested_in_human_trials, it will indicate a 1 if it has ever been tested in a human clinical trial (even if it failed)
# Chris: Source for this data will be clinicaltrails.gov
# TODO - check out API, leave NA for now
gene_drug_df['tested_in_human_trials'] = np.nan
gene_drug_df['in_clinical_trials'] = np.nan
# Rename foreign key columns
gene_drug_df.rename(
columns={'gene': 'gene_id', 'drug': 'drug_id', 'tissue': 'tissue_id'}, inplace=True)
# Add dataset id
gene_drug_df['dataset_id'] = pset_name
# Add missing columns (TODO - get this data)
gene_drug_df['tstat'] = np.nan
gene_drug_df['fstat'] = np.nan
gene_drug_df['FWER_genes'] = np.nan
gene_drug_df['FWER_drugs'] = np.nan
gene_drug_df['FWER_all'] = np.nan
gene_drug_df['BF_p_all'] = np.nan
gene_drug_df['meta_res'] = np.nan
# Reorder columns
return gene_drug_df[['gene_id', 'drug_id', 'estimate', 'se', 'n', 'tstat', 'fstat',
'pvalue', 'df', 'fdr', 'FWER_genes', 'FWER_drugs', 'FWER_all',
'BF_p_all', 'meta_res', 'dataset_id', 'sens_stat', 'tissue_id',
'mDataType', 'tested_in_human_trials', 'in_clinical_trials']]
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,824 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/write_pset_table.py | import os
from datatable import Frame
def write_pset_table(pset_df, df_name, pset_name, df_dir):
"""
Write a PSet table to a CSV file.
@param pset_df: [`DataFrame`] A PSet DataFrame
@param pset_name: [`string`] The name of the PSet
@param df_dir: [`string`] The name of the directory to hold all the PSet tables
@return [`None`]
"""
pset_path = os.path.join(df_dir, pset_name)
# Make sure directory for this PSet exists
if not os.path.exists(pset_path):
os.mkdir(pset_path)
# Convert to datatable Frame for fast write to disk
pset_df = Frame(pset_df)
print(f'Writing {df_name} table to {pset_path}...')
# Use datatable to convert df to csv
pset_df.to_csv(os.path.join(pset_path, f'{pset_name}_{df_name}.csv'))
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,825 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/__init__.py | from .download_psets import *
from .download_canonical_psets import *
from .download_gene_signatures import *
from .get_gene_targets import get_gene_targets
from .get_target_annotations import get_target_annotations, query_uniprot_mapping_api
from .read_pset import pset_df_to_nested_dict, read_pset_file, read_pset
from .build_all_pset_tables import build_all_pset_tables
from .get_chembl_targets import get_chembl_targets
from .get_chembl_drug_targets import get_chembl_drug_target_mappings
from .build_target_tables import build_target_tables
from .combine_pset_tables import combine_all_pset_tables
from .build_synonym_tables import build_cell_synonym_df, build_drug_synonym_df, build_tissue_synonym_df
from .build_cellosaurus import build_cellosaurus_df, cellosaurus_path
from .build_clinical_trial_tables import build_clinical_trial_tables | {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,826 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_synonym_tables.py | import os
import re
import glob
import numpy as np
import pandas as pd
from datatable import Frame
from PharmacoDI.combine_pset_tables import write_table
output_dir = os.path.join('data', 'demo')
metadata_dir = os.path.join('data', 'metadata')
cell_file = "cell_annotation_all.csv"
tissue_file = "cell_annotation_all.csv"
drug_file = "drugs_with_ids.csv"
def get_metadata(file_name, metadata_dir):
# Find correct metadata annotations CSV file
annotations_file = glob.glob(
os.path.join(metadata_dir, file_name))
if not annotations_file:
raise ValueError(
f'No metadata file named {file_name} could be found in {metadata_dir}')
# Read csv file and return df
return pd.read_csv(annotations_file[0], index_col=[0])
# --- SYNONYMS TABLES --------------------------------------------------------------------------
def build_cell_synonym_df(cell_file, metadata_dir, output_dir):
# Get metadata file and cell_df
cell_metadata = get_metadata(cell_file, metadata_dir)
cell_df = pd.read_csv(os.path.join(output_dir, 'cell.csv'))
# Find all columns relevant to cellid
pattern = re.compile('cellid')
cell_columns = cell_metadata[[
col for col in cell_metadata.columns if pattern.search(col)]]
# Get all unique synonyms and join with cell_df
cell_synonym_df = melt_and_join(cell_columns, 'unique.cellid', cell_df)
cell_synonym_df = cell_synonym_df.rename(columns={'id': 'cell_id', 'value': 'cell_name'})
# Add blank col for dataset_id (TODO)
cell_synonym_df['dataset_id'] = np.nan
# Convert to datatable.Frame for fast write to disk
df = Frame(cell_synonym_df)
df = write_table(df, 'cell_synonym', output_dir)
return df
def build_tissue_synonym_df(tissue_file, metadata_dir, output_dir):
# Get metadata file and tissue_df (assume taht tissue_df is also in output_dir)
tissue_metadata = get_metadata(tissue_file, metadata_dir)
tissue_df = pd.read_csv(os.path.join(output_dir, 'tissue.csv'))
# Find all columns relevant to tissueid
pattern = re.compile('tissueid')
tissue_cols = tissue_metadata[[
col for col in tissue_metadata.columns if pattern.search(col)]]
# Get all unique synonyms and join with tissue_df
tissue_synonym_df = melt_and_join(tissue_cols, 'unique.tissueid', tissue_df)
tissue_synonym_df = tissue_synonym_df.rename(columns={'id': 'tissue_id', 'value': 'tissue_name'})
# Add blank col for dataset_id (TODO)
tissue_synonym_df['dataset_id'] = np.nan
# Convert to datatable.Frame for fast write to disk
df = Frame(tissue_synonym_df)
df = write_table(df, 'tissue_synonym', output_dir)
return df
def build_drug_synonym_df(drug_file, metadata_dir, output_dir):
# Get metadata file and drug_df
drug_metadata = get_metadata(drug_file, metadata_dir)
drug_df = pd.read_csv(os.path.join(output_dir, 'drug.csv'))
# Find all columns relevant to drugid
# Right now only FDA col is dropped, but may be more metadata in the future
pattern = re.compile('drugid')
drug_cols= drug_metadata[[
col for col in drug_metadata.columns if pattern.search(col)]]
# Get all unique synonyms and join with drugs_df
drug_synonym_df = melt_and_join(drug_cols, 'unique.drugid', drug_df)
drug_synonym_df = drug_synonym_df.rename(columns={'id': 'drug_id', 'value': 'drug_name'})
# Add blank col for dataset_id (TODO)
drug_synonym_df['dataset_id'] = np.nan
# Convert to datatable.Frame for fast write to disk
df = Frame(drug_synonym_df)
df = write_table(df, 'drug_synonym', output_dir)
return df
# Helper function for getting all synonyms related to a certain df
def melt_and_join(meta_df, unique_id, join_df):
"""
@param meta_df: [`Dask DataFrame`] The DataFrame containing all the synonyms (metadata)
@param unique_id: [`string`] The name of the column in the metadata containing the unique IDs
@param join_df: [`Dask DataFrame`] THe DataFrame containing the primary keys that will be used as
foreign keys in the new synonyms df
@return [`DataFrame`] The synonys dataframe, with a PK, FK based on join_df, and all unique synonyms
"""
# Convert wide meta_df to long table
# Drop 'variable' col (leave only unique ID and synonyms), drop duplicates
synonyms = pd.melt(meta_df, id_vars=[unique_id])[
[unique_id, 'value']].drop_duplicates()
# Drop all rows where value is NA
synonyms = synonyms[synonyms['value'].notnull()]
# Join with join_df based on unique_id
synonyms = pd.merge(synonyms, join_df, left_on=unique_id,
right_on='name', how='inner')[['id', 'value']]
synonyms['id'] = synonyms['id'].astype('int')
return synonyms
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,827 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_cellosaurus.py | import pandas as pd
import os
from multiprocessing import Pool, cpu_count
from collections import defaultdict
import datatable as dt
from PharmacoDI.combine_pset_tables import join_tables, write_table
if 'PharmacoDI' not in os.getcwd():
os.chdir('PharmacoDI')
cellosaurus_path = 'data/metadata/cellosaurus.txt'
# Using a default dict because it allows me to append duplicate indexes into a list
# Helper for build_cellosaurus_df
def build_defaultdict(tuple_list):
def_dict = defaultdict(list)
for tup in tuple_list:
def_dict[tup[0]].append(tup[1])
return def_dict
# Only 1666 rows; try joining with cell synonym df instead ? (TODO)
def build_cellosaurus_df(cellosaurus_path, output_dir, cell_df):
"""
Build cellosaurus table.
@param cellosaurus_path: [`string`] Full file path to cellosaurus file
@param output_dir: [`string`] The directory to write the cellosaurus table
@param cell_df: [`datatable.Frame`] The cell table; should be renamed, keyed,
and shouldn't have 'tissue_id' column
@return: [`datatable.Frame`] The cellosaurus table
"""
with open(cellosaurus_path) as f:
file = [line for line in f]
file = file[55:]
entries = ''.join(file).split('//\n')
entry_list = [entry.split('\n') for entry in entries]
entry_split_list = [[item.split(' ')
for item in entry] for entry in entry_list]
entry_tuple_list = [[(item[0], item[1]) for item in entry if len(
item) > 1] for entry in entry_split_list]
pool = Pool(cpu_count() - 1)
dict_list = pool.map(build_defaultdict, entry_tuple_list)
dict_list = [dict(item) for item in dict_list]
dict_list = [{key: '|||'.join(value)
for key, value in dct.items()} for dct in dict_list]
cellosaurus_df = pd.DataFrame(dict_list)
cellosaurus_df.dropna(axis=1, how='all', inplace=True)
# Always close your pool or you will have a bunch of processes doing nothing
pool.close()
# Drop AG and DT columns (age of donor, date)
cellosaurus_df.drop(columns=['AG', 'DT'], inplace=True)
# Rename cols and add cell_id column
rename_dict = {col: col.lower() for col in cellosaurus_df.columns}
cellosaurus_df.rename(columns=rename_dict, inplace=True)
cellosaurus_df.rename(
columns={'id': 'identifier', 'ac': 'accession'}, inplace=True)
cellosaurus_df['cell_id'] = cellosaurus_df['identifier']
# Convert to datatable and join with cell_df
df = join_tables(dt.Frame(cellosaurus_df), cell_df, 'cell_id')
df = df[dt.f.cell_id >= 1, :]
df = df[:, ['cell_id', 'identifier', 'accession', 'as', 'sy',
'dr', 'rx', 'ww', 'cc', 'st', 'di', 'ox', 'hi', 'oi', 'sx', 'ca']]
df = write_table(df, 'cellosaurus', output_dir)
return df
#NOTE: These don't map:
"""
>>> cell_df[~cell_df['id'].isin(ids)]
id name tissue_id
141 142 BT179 19
221 222 COLO_005 19
222 223 COLO_011 19
223 224 COLO_021 19
481 482 HCC812 7
742 743 KRIJ 19
960 961 NCE G-28T 8 This one should map to CVCL_0V15
1183 1184 OESO_009 19
1184 1185 OESO_040 19
1237 1238 PD1503a 19
"""
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,828 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_primary_pset_tables.py | import os
import glob
import pandas as pd
import numpy as np
def build_primary_pset_tables(pset_dict, pset_name):
"""
Build the tissue, drug, and gene tables for a PSet and return them
in a dictionary, with table names as the keys.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@return: [`dict`] A dictionary of primary tables, with table names as keys
"""
pset_dfs = {}
pset_dfs['dataset'] = pd.Series(pset_name, name='name')
pset_dfs['tissue'] = build_tissue_df(pset_dict)
pset_dfs['drug'] = build_drug_df(pset_dict)
pset_dfs['drug_annotation'] = build_drug_annotation_df(pset_dict)
pset_dfs['cell'] = build_cell_df(pset_dict)
# Don't make gene table if there are no molecular profiles (TODO - check with chris)
if 'molecularProfiles' in pset_dict:
pset_dfs['gene'] = build_gene_df(pset_dict)
pset_dfs['gene_annotation'] = build_gene_annotation_df(pset_dict)
return pset_dfs
def build_gene_df(pset_dict):
"""
Build a table containing all genes in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The gene table
"""
gene_df = pd.Series([], name='name', dtype='str')
for mDataType in pset_dict['molecularProfiles']:
gene_df = gene_df.append(pd.Series(pd.unique(
pset_dict['molecularProfiles'][mDataType]['rowData']['.features']),
name='name', dtype='str'))
# Many ENSEMBL gene IDs have the version (ex. ENST00000456328.2 instead
# of ENST00000456328); remove all version numbers
gene_df.replace('\.[0-9]$', '', regex=True, inplace=True)
gene_df.drop_duplicates(inplace=True)
return gene_df
def build_tissue_df(pset_dict):
"""
Build a table containing all tissues in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The tissue table
"""
tissue_df = pd.Series(
pd.unique(pset_dict['cell']['tissueid']), name='name')
return tissue_df
def build_drug_df(pset_dict):
"""
Build a table containing all drugs in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The drug table
"""
drug_df = pd.Series(pd.unique(pset_dict['drug']['drugid']), name='name')
return drug_df
def build_gene_annotation_df(pset_dict):
"""
Build a table mapping each gene in a dataset to its gene annotations.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all gene annotations, mapped to genes
"""
gene_annotation_df = pd.DataFrame(
columns=['gene_id', 'symbol', 'gene_seq_start', 'gene_seq_end'], dtype='str')
for mDataType in pset_dict['molecularProfiles']:
df = pset_dict['molecularProfiles'][mDataType]['rowData'].copy()
# Get gene annotation columns
cols = ['.features']
if 'Symbol' in df.columns:
cols.append('Symbol')
df = df[cols]
df.rename(columns={'.features': 'gene_id',
'Symbol': 'symbol'}, inplace=True)
gene_annotation_df = gene_annotation_df.append(df)
# Remove all ENSEMBL gene id version numbers (ex. ENST00000456328.2 instead of ENST00000456328)
gene_annotation_df['gene_id'].replace('\.[0-9]$', '',
regex=True, inplace=True)
gene_annotation_df.drop_duplicates(subset=['gene_id'], inplace=True)
return gene_annotation_df
def build_drug_annotation_df(pset_dict):
"""
Build a table mapping each drug in a dataset to its drug annotations.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all drug annotations, mapped to drugs
"""
# Make drug_annotations df
drug_annotation_df = pset_dict['drug'][[
'rownames', 'smiles', 'inchikey', 'cid', 'FDA']].copy()
drug_annotation_df.rename(
columns={'rownames': 'drug_id', 'cid': 'pubchem', 'FDA': 'fda_status'}, inplace=True)
return drug_annotation_df
# TODO - confirm that you're using the correct cell id
def build_cell_df(pset_dict):
"""
Build a table containing all the cells in a dataset, mapped to their tissues.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all cell lines, mapped to tissues
"""
cell_df = pset_dict['cell'][['cellid', 'tissueid']].copy()
cell_df.rename(columns={'cellid': 'name',
'tissueid': 'tissue_id'}, inplace=True)
return cell_df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,829 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_all_pset_tables.py | import os
import glob
import re
import pandas as pd
import numpy as np
from PharmacoDI.build_primary_pset_tables import build_primary_pset_tables, build_cell_df, build_drug_df, build_tissue_df
from PharmacoDI.build_experiment_tables import build_experiment_tables, build_experiment_df
from PharmacoDI.build_pset_gene_drugs import build_gene_drug_df
from PharmacoDI.write_pset_table import write_pset_table
from PharmacoDI.build_dataset_join_tables import build_dataset_join_dfs
def build_all_pset_tables(pset_dict, pset_name, procdata_dir, gene_sig_dir):
"""
Build all tables for a dataset and write them to a directory of all processed data.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param procdata_dir: [`string`] The file path to the directory containing processed data
@param gene_sig_dir: [`string`] The file path to the directory containing gene_drugs data
@return: [`None`]
"""
pset_dfs = {}
# Build primary tables (relating to cells, drugs, tissues, genes)
print('Building primary tables...')
pset_dfs = build_primary_pset_tables(pset_dict, pset_name)
print('Building dataset join tables...')
pset_dfs = {**pset_dfs, **build_dataset_join_dfs(
pset_dict, pset_name, pset_dfs)}
# Build experiment tables
print('Building experiment tables...')
# FIX: Modified to use pre-3.9 syntax to ensure backwards compatibility
pset_dfs = {**pset_dfs, **build_experiment_tables(
pset_dict, pset_name, pset_dfs['cell'])}
# Build gene drugs table
print('Building gene drug table...')
pset_dfs['gene_drug'] = build_gene_drug_df(gene_sig_dir, pset_name)
if not isinstance(pset_dfs['gene_drug'], pd.DataFrame):
del pset_dfs['gene_drug']
# Build summary/stats tables
print('Building mol_cell and dataset_stats tables...')
if 'gene_drug' in pset_dfs:
pset_dfs['mol_cell'] = build_mol_cell_df(
pset_dict, pset_name, pset_dfs['gene_drug'], pset_dfs['dataset_cell'])
pset_dfs['dataset_statistics'] = build_dataset_stats_df(
pset_dict, pset_name, pset_dfs)
# Write all tables to CSV files
for df_name in pset_dfs.keys():
write_pset_table(pset_dfs[df_name], df_name, pset_name, procdata_dir)
def build_mol_cell_df(pset_dict, pset_name, gene_drug_df, dataset_cell_df=None):
"""
Builds a table that summarizes the number of profiles, per cell line, per molecular data
type, in this dataset. (Only considers molecular data types for which there are sens stats?)
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param gene_drug_df: [`pd.DataFrame`] The gene_drug table for this PSet
@param dataset_cell_df: [`pd.DataFrame`] A table containing all the cells in this
PSet and the PSet name
@return: [`pd.DataFrame`] The table with the number of profiles for each cell line,
for each molecular data type
"""
mol_cell_df = pd.DataFrame(
columns=['cell_id', 'dataset_id', 'mDataType', 'num_prof'])
molecularTypes = pd.unique(gene_drug_df['mDataType'])
if 'molecularProfiles' in pset_dict:
profiles_dict = pset_dict['molecularProfiles']
else:
profiles_dict = None
if dataset_cell_df is None:
dataset_cell_df = build_dataset_cell_df(
pset_dict, pset_name, cell_df=None)
for mDataType in molecularTypes:
if isinstance(profiles_dict, dict):
# Get the number of times each cellid appears in colData for that mDataType
num_profiles = profiles_dict[mDataType]['colData']['cellid'].value_counts(
)
# Join with datasets cells on cellid
df = pd.merge(dataset_cell_df, num_profiles,
left_on='cell_id', right_on=num_profiles.index, how='left')
# Rename num_profiles column
df.rename(columns={'cellid': 'num_prof'}, inplace=True)
# Set mDataType column to the current molecular type
df['mDataType'] = mDataType
else:
# If PSet contains no molecular profiles, set num_prof to 0
# for all celll lines and all molecular data types
df = dataset_cell_df.copy()
df['mDataType'] = mDataType
df['num_prof'] = 0
# Append to mol_cell_df
mol_cell_df = mol_cell_df.append(df)
# Replace any NaN in the num_profiles column with 0
mask = mol_cell_df.query('num_prof.isna()').index
mol_cell_df.loc[mask, 'num_prof'] = 0
mol_cell_df['num_prof'] = mol_cell_df['num_prof'].astype('int32')
return mol_cell_df
def build_dataset_stats_df(pset_dict, pset_name, pset_dfs=None):
"""
Summarizes how many cell lines, tissues, drugs, and experiments are contained
within the dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param pset_dfs: [`dict`] A dictionary of tables from the PSet, with table names
as the keys
@return: [`pd.DataFrame`] A one-row table with the summary stats for this PSet
"""
if pset_dfs is None:
pset_dfs = {}
if 'tissue' not in pset_dfs:
pset_dfs['tissue'] = build_tissue_df(pset_dict)
if 'cell' not in pset_dfs:
pset_dfs['cell'] = build_cell_df(pset_dict)
if 'drug' not in pset_dfs:
pset_dfs['drug'] = build_drug_df(pset_dict)
if 'experiment' not in pset_dfs:
pset_dfs['experiment'] = build_experiment_df(
pset_dict, pset_name, pset_dfs['cell'])
return pd.DataFrame({
'dataset_id': [pset_name],
'cell_lines': [len(pset_dfs['cell'].index)],
'tissues': [len(pset_dfs['tissue'].index)],
'drugs': [len(pset_dfs['drug'].index)],
'experiments': [len(pset_dfs['experiment'].index)]
})
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,830 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/download_gene_signatures.py | import sys
import os
def download_gene_signatures(user=None, password=None, remote="niagara.computecanada.ca", opts={},
remote_path="/scratch/b/bhaibeka/psmirnov/pearson_perm_res/", save_dir="data/rawdata/gene_signatures"):
"""
Download all precomputed gene signatures from the `remote_path` directory on the `remote` server, excluding those
already in `save_dir`.
WARNING: The current implementation relies on rysnc being installed. Thus it will not work on Windows platforms.
:param user [string] Your username for the remote server. In interactive sessions, if you exclude this argument
you will be prompted to enter your username.
:param password [string] Your password for the remote server. To avoid hard coding your password, we recommend
you read this from an environmental variables using `os.environment`. In interactive sessions, if you exclude
this argument you will be prompted to enter you password.
:param remote: [string] Name or IP of remote server
:param remote_path: [string] Path to the gene signature directory on the remote server
:param save_dir: [string] Path to save the downloaded gene signature to.
:return: [None] Syncs `save_dir` with `remote:remote_path`, downloading the most up to date gene signatures for
PharmacoDB.
"""
if user == None or password == None:
if sys.flags.interactive:
if user == None:
user = input("Please enter your username for {}: ".format(remote))
if password == None:
password = input("Please enter your password for {}: ".format(remote))
else:
raise ValueError("You must pass `user` and `password` parameters to this function in non-interative Python sessions")
os.system(f'rsync -hrtvP {user}@{remote}:{remote_path} {save_dir}')
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,831 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/download_canonical_psets.py | import pandas as pd
import wget
def download_canonical_psets(save_dir, api_url= "https://www.orcestra.ca/api/psets/canonical"):
"""
Download the specified canonical PSets from the specified api_url
:param save_dir: [string] Path to save the PSets in
:param api_url: [string] URL where available PSets can be retrieved. Defaults to current Orcestra API.
:return: [None] Downloads PSet .rds files into save_dir using wget.
"""
pset_df = pd.read_json(api_url)
url_dict = pset_df.set_index('name').to_dict()['downloadLink']
for name, url in url_dict.items():
print("Downloading", name, "from", url, sep=" ")
wget.download(url, save_dir + "/" + name + '.rds')
return None
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,832 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_target_tables.py | import os
import requests
import pandas as pd
import numpy as np
from multiprocessing import Pool, cpu_count
import datatable as dt
from PharmacoDI.combine_pset_tables import write_table, rename_and_key, join_tables
from PharmacoDI.get_chembl_drug_targets import parallelize
drugbank_file = os.path.join(
"data", "metadata", "drugbank_targets_has_ref_has_uniprot.csv")
chembl_file = os.path.join('data', 'metadata', 'chembl_drug_targets.csv')
output_dir = os.path.join("data", "demo")
def build_target_tables(drugbank_file, chembl_file, output_dir):
"""
Build the target and drug target tables using data from Drugbank
and ChEMBL.
@param drugbank_file: [`string`] The full file path to Drugbank targets
@param chembl_file: [`string`] The full file path to ChEMBL targets
@param output_dir: [`string`] The directory of all final PharmacoDB tables
@return: None
"""
# Get Drugbank data
if not os.path.exists(drugbank_file):
raise FileNotFoundError(f"The file {drugbank_file} doesn't exist!")
drugbank_df = pd.read_csv(drugbank_file)
drugbank_df.rename(columns={'polypeptide.external.identifiers.UniProtKB': 'uniprot_id',
'drugName': 'drug_name'}, inplace=True)
# Get ChEMBL data
if not os.path.exists(chembl_file):
raise FileNotFoundError(f"The file {chembl_file} doesn't exist!")
chembl_df = pd.read_csv(chembl_file, index_col=0)
chembl_df.rename(columns={'pref_name': 'name',
'accession': 'uniprot_id'}, inplace=True)
target_df = build_target_table(chembl_df, drugbank_df, output_dir)
build_drug_target_table(chembl_df, drugbank_df, target_df, output_dir)
build_gene_drug_table(chembl_df, drugbank_df, target_df, output_dir)
def build_target_table(chembl_df, drugbank_df, output_dir):
"""
Using data from the Drugbank and ChEMBL drug target files and
the UniProt API, build the target table.
@param chembl_df: [`pd.DataFrame`] The ChEMBL drug target table
@param drugbank_df: [`pd.DataFrame`] The DrugBank drug target table
@param output_dir: [`string`] The file path to write the final target table
@return: [`datatable.Frame`] The target table
"""
# Combine ChEMBL and Drugbank tables to make target table
target_df = pd.concat([chembl_df[['name']].copy(),
drugbank_df[['name']].copy()])
target_df.drop_duplicates(inplace=True)
target_df = write_table(dt.Frame(target_df), 'target', output_dir)
target_df = rename_and_key(target_df, 'target_id')
return target_df
def build_drug_target_table(chembl_df, drugbank_df, target_df, output_dir):
"""
Using data from the Drugbank and ChEMBL drug target files and
the target table, build the drug target table.
@param chembl_df: [`pd.DataFrame`] The ChEMBL drug target table
@param drugbank_df: [`pd.DataFrame`] The DrugBank drug target table
@param target_df: [`datatable.Frame`] The target table, keyed
@param output_dir: [`string`] The file path with all final PharmacoDB tables
@return: [`datatable.Frame`] The drug target table
"""
# Load drug synonym table from output_dir
drug_synonym_file = os.path.join(output_dir, 'drug_synonym.csv')
if not os.path.exists(drug_synonym_file):
raise FileNotFoundError(
f"There is no drug synonym file in {output_dir}!")
drug_syn_df = pd.read_csv(drug_synonym_file, dtype={'drug_id': 'int32'})
# Join drugbank df with drug table (TODO: are we really using drug name to map?)
drugbank_df = pd.merge(drugbank_df, drug_syn_df, on='drug_name')
# TODO: from 7521 down to only 122 rows :/
# Combine ChEMBL and Drugbank tables to make drug target table
drug_target_df = pd.concat([chembl_df[['name', 'drug_id']].copy(),
drugbank_df[['name', 'drug_id']].copy()])
drug_target_df.rename(columns={'name': 'target_id'}, inplace=True)
drug_target_df.drop_duplicates(inplace=True)
# Join with target table
drug_target_df = dt.Frame(drug_target_df)
drug_target_df = join_tables(drug_target_df, target_df, 'target_id')
# Drop rows with no target_id, drop duplicates
drug_target_df = drug_target_df[dt.f.target_id >= 1, :]
drug_target_df = drug_target_df[0, :, dt.by(drug_target_df.names)]
drug_target_df = write_table(
drug_target_df, 'drug_target', output_dir, add_index=False)
return drug_target_df
def build_gene_drug_table(chembl_df, drugbank_df, target_df, output_dir):
"""
Build a join table...
@param chembl_df: [`pd.DataFrame`] The ChEMBL drug target table
@param drugbank_df: [`pd.DataFrame`] The DrugBank drug target table
@param target_df: [`datatable.Frame`] The target table, keyed
@param output_dir: [`string`] The file path with all final PharmacoDB tables
@return: [`datatable.Frame`] The gene_target table
"""
# Get target-uniprot mappings from ChEMBL and Drugbank tables
gene_target_df = pd.concat([chembl_df[['name', 'uniprot_id']].copy(),
drugbank_df[['name', 'uniprot_id']].copy()])
gene_target_df.rename(columns={'name': 'target_id'}, inplace=True)
gene_target_df.drop_duplicates(inplace=True)
# Retrieve Uniprot-ENSEMBL gene ID mappings
uniprot_ids = pd.Series(pd.unique(gene_target_df['uniprot_id']))
uniprot_ensembl_mappings = pd.concat(
parallelize(uniprot_ids, map_uniprot_to_ensembl, 1000))
uniprot_ensembl_mappings.drop_duplicates(inplace=True)
# Join gene_target table with gene table based on uniprot-ensembl mappings
gene_target_df = pd.merge(
gene_target_df, uniprot_ensembl_mappings, on='uniprot_id')
gene_target_df.drop(columns=['uniprot_id'], inplace=True)
# Load and key the gene table from output_dir
gene_file = os.path.join(output_dir, 'gene.csv')
if not os.path.exists(gene_file):
raise FileNotFoundError(f"There is no gene file in {output_dir}!")
gene_df = dt.fread(gene_file, sep=",")
gene_df = rename_and_key(gene_df, 'gene_id')
# Join target table with gene table and target table
gene_target_df = dt.Frame(gene_target_df)
gene_target_df = join_tables(gene_target_df, gene_df, 'gene_id')
gene_target_df = join_tables(gene_target_df, target_df, 'target_id')
# Drop columns that didn't join and drop duplicates
gene_target_df = gene_target_df[(
dt.f.target_id >= 1) & (dt.f.gene_id >= 1), :]
gene_target_df = gene_target_df[0, :, dt.by(gene_target_df.names)]
gene_target_df = write_table(
gene_target_df, 'gene_target', output_dir, add_index=False)
return gene_target_df
def map_uniprot_to_ensembl(uniprot_ids):
"""
Use the UniProt API to retrieve the ENSEMBL gene IDs
corresponding to the UniProt IDS.
@param uniprot_ids: [`list(string)`] A list of UniProt IDs.
@return: [`pd.DataFrame`] A table mapping UniProt IDs to ENSEMBL gene IDs.
"""
# Make API call
params = {
'from': 'ID',
'to': 'ENSEMBL_ID',
'format': 'tab',
'query': " ".join(uniprot_ids)
}
r = requests.get('https://www.uniprot.org/uploadlists/', params=params)
# Check that request was successful
r.raise_for_status()
# Split text into rows (one row per ID) and build df
# Exclude first row (header)
gene_id_df = pd.DataFrame(r.text.split("\n")[1:])
# Split into two columns, UniprotId and EnsemblId
gene_id_df = gene_id_df[0].str.split(pat="\t", expand=True)
# Drop empty rows and rename columns
gene_id_df.dropna(inplace=True)
gene_id_df.rename(columns={0: 'uniprot_id', 1: 'gene_id'}, inplace=True)
return gene_id_df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,833 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/get_chembl_drug_targets.py | from chembl_webresource_client.new_client import new_client
import multiprocessing as mp
import numpy as np
import pandas as pd
import os
def get_chembl_drug_target_mappings(drug_annotation_file, target_file, drug_target_file):
"""
Get drug target mappings for all drugs in the drug_annotation files (using standard
inchi key to map between the file and ChEMBL) and all targets in the target file.
Write to drug_target file and return the resulting DataFrame.
:drug_annotation_file: the full file path to the drug_annotation table
:target_file: the full file path to the target csv file
:drug_target_file: the full file path where the drug target file will be written
:return: the ChEMBL drug target DataFrame
"""
# Load drug annotations table
if not os.path.exists(drug_annotation_file):
raise FileNotFoundError(f"The file {drug_annotation_file} does not exist!")
drug_df = pd.read_csv(drug_annotation_file)
# Get inchikeys of all drugs (not smiles bc capitalization in ChEMBL is inconsistent..)
inchikeys = list(pd.unique(drug_df['inchikey'].dropna()))
# Get ChEMBL drugs with matching inchikeys
print('Getting all drugs from ChEMBL...')
chembl_drug_df = pd.concat(parallelize(
inchikeys, get_drugs_by_inchikey, 50))
chembl_drug_df = pd.merge(
drug_df[['drug_id', 'inchikey']], chembl_drug_df, on='inchikey', how='inner')
chembl_drug_df.drop(columns='inchikey', inplace=True)
molecule_ids = list(chembl_drug_df['molecule_chembl_id'])
# Get targets from target_file
if not os.path.exists(target_file):
print(f"ERROR: the ChEMBL target file {target_file} doesn't exist!\n"
"Call get_chembl_targets to generate this file.")
target_df = pd.read_csv(target_file, index_col=0)
target_df = target_df[['target_chembl_id',
'pref_name', 'target_type', 'accession']].copy()
target_df.drop_duplicates(inplace=True)
target_ids = list(pd.unique(target_df['target_chembl_id']))
# Get mappings between drugs (molecule_ids) and targets (target_ids)
# TODO: I'm not sure if parallelization actually helps for this API call
print('Getting drug-target mappings from ChEMBL...')
drug_target_mappings = parallelize(
molecule_ids, get_drug_target_mappings, 50, target_ids)
drug_target_df = pd.concat(drug_target_mappings).drop_duplicates()
drug_target_df = pd.merge(
drug_target_df, chembl_drug_df, on='molecule_chembl_id')
drug_target_df = pd.merge(drug_target_df, target_df, on='target_chembl_id')
# Reorder columns and write to .csv
drug_target_df = drug_target_df[['drug_id', 'molecule_chembl_id',
'target_chembl_id', 'pref_name', 'accession', 'target_type']].copy()
drug_target_df.to_csv(drug_target_file)
return drug_target_df
# TODO: add progress bar to parallelize
def parallelize(queries, operation, chunksize, *args):
"""
Splits queries into chunks of chunksize and then uses a pool to
parallelize operation on the query chunks.
:queries: list of arguments passed to function (in tuples)
:operation: function being parallelized
:chuksize: integer representing how many queries each process should handle
:return: list of results for each query chunk
"""
chunked_queries = [queries[i:i+chunksize]
for i in np.arange(0, len(queries), chunksize)]
pool = mp.Pool(mp.cpu_count())
# If operation requires extra args, use pool.starmap instead of pool.map
if args:
for i in range(len(chunked_queries)):
chunked_queries[i] = (chunked_queries[i], *args)
results = pool.starmap(operation, chunked_queries)
else:
results = pool.map(operation, chunked_queries)
pool.close()
return results
def get_drugs_by_inchikey(inchikeys):
"""
Get all drugs in the ChEMBL database with matching inchikeys.
:inchikeys: A list of inchikeys
:return: A dataframe of drugs (including ChEMBL ID and inchikey)
"""
chembl_drug_df = pd.DataFrame(columns=['inchikey', 'molecule_chembl_id'])
# Initiate connection to ChEMBL molecule table
molecule = new_client.molecule
molecules = molecule.filter(molecule_structures__standard_inchi_key__in=inchikeys).only(
['molecule_chembl_id', 'molecule_structures'])
for mol in molecules:
inchikey = mol['molecule_structures']['standard_inchi_key']
chembl_drug_df = chembl_drug_df.append({'inchikey': inchikey,
'molecule_chembl_id': mol['molecule_chembl_id']},
ignore_index=True)
return chembl_drug_df
def get_drug_target_mappings(molecule_ids, target_ids):
"""
Retrieves mapping between drugs specified by ChEMBL molecule_ids and targets
specified by ChEMBL target_ids.
:molecule_ids: A list of ChEMBL drug IDs
:target_ids: A list of ChEMBL target IDs
:return: A DataFrame of drug target mappings (molecule ChEMBL ID and target ChEMBL ID)
"""
# Initiate connection to ChEMBL activity table
activity = new_client.activity
results = activity.filter(molecule_chembl_id__in=molecule_ids, target_chembl_id__in=target_ids,
pchembl_value__isnull=False).only(['molecule_chembl_id', 'target_chembl_id'])
mappings = list(results)
df = pd.DataFrame(mappings)
return df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,834 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/download_psets.py | import pandas as pd
import wget
def download_psets(names, save_dir, api_url="https://www.orcestra.ca/api/psets/available"):
"""
Download the specified PSets from the specified api_url
:param names: [list] Names of PSets to download. Must match the 'names' in the api call JSON.
:param save_dir: [string] Path to save the PSets in
:param api_url: [string] URL where available PSets can be retrieved. Defaults to current Orcestra API.
:return: [None] Downloads PSet .rds files into save_dir using wget.
"""
pset_df = pd.read_json(api_url)
names = pd.Series(names)
if not all(names.isin(pset_df.name)):
print(names[~names.isin(pset_df.name)] +
' not in names of retrived from api_url')
raise ValueError(names[~names.isin(pset_df.name)] + 'are not valid pset names')
pset_df = pset_df[pset_df.name.isin(names)]
url_dict = pset_df.set_index('name').to_dict()['downloadLink']
for name, url in url_dict.items():
print("Downloading", name, "from", url, sep=" ")
wget.download(url, save_dir + "/" + name + '.rds')
return None
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,835 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/combine_pset_tables.py | import glob
import os
import re
import numpy as np
import pandas as pd
from datatable import dt, fread, iread, join, by, rbind, cbind, f
def combine_all_pset_tables(data_dir, output_dir):
"""
Combine all PSet tables into the final PharmacoDB tables.
@param data_dir: [`string`] The file path to read the PSet tables
@param output_dir: [`string`] The file path to write the final tables
@return: [`dict(string: datatable.Frame)`] A dictionary of all some of the
final tables, with names as keys, to be used for later joins
"""
print("Combining all PSet tables...")
join_dfs = combine_primary_tables(data_dir, output_dir)
join_dfs = combine_secondary_tables(data_dir, output_dir, join_dfs)
join_dfs = combine_experiment_tables(data_dir, output_dir, join_dfs)
return join_dfs
def combine_primary_tables(data_dir, output_dir):
"""
Build all the primary tables, i.e., tables that require no joins,
and return them in a dictionary.
@param data_dir: [`string`] The file path to read the PSet tables
@param output_dir: [`string`] The file path to write the final tables
@return: [`dict(string: datatable.Frame)`] A dictionary of all the primary
tables, with names as keys
"""
# Load, concatenate, and write primary tables to disk
tissue_df = load_join_write('tissue', data_dir, output_dir)
drug_df = load_join_write('drug', data_dir, output_dir)
gene_df = load_join_write('gene', data_dir, output_dir)
dataset_df = load_join_write('dataset', data_dir, output_dir)
# Transform tables to be used for joins
dfs = {}
dfs['tissue'] = rename_and_key(tissue_df, 'tissue_id')
dfs['drug'] = rename_and_key(drug_df, 'drug_id')
dfs['gene'] = rename_and_key(gene_df, 'gene_id')
dfs['dataset'] = rename_and_key(dataset_df, 'dataset_id')
return dfs
def combine_secondary_tables(data_dir, output_dir, join_dfs):
"""
Build all secondary tables, i.e., all tables that have foreign keys corresponding
to primary keys of primary tables. The function reads PSet tables from
data_dir, concatenates and joins them with tables from join_dfs, and
writes them to output_dir.
@param join_dfs: [`dict(string: datatable.Frame)`] A dictionary of all the primary
tables, with names as keys
@param data_dir: [`string`] The file path to read the PSet tables
@param output_dir: [`string`] The file path to write the final tables
@return: [`dict(string: datatable.Frame)`] The updated dictionary of join tables
"""
# Build cell table and add to join_dfs dictionary
cell_df = load_join_write(
'cell', data_dir, output_dir, ['tissue'], join_dfs)
join_dfs['cell'] = rename_and_key(cell_df, 'cell_id')
# Build drug annotation table
load_join_write('drug_annotation', data_dir,
output_dir, ['drug'], join_dfs, add_index=False)
# Build gene annotation table
gene_annot_df = load_table('gene_annotation', data_dir)
# Remove any rows with no actual annotations (no symbol)
gene_annot_df = gene_annot_df[dt.f.symbol > "", :]
# Join the other way so that genes that got cut out are included back in
gene_annot_df.key = 'gene_id'
gene_annot_df = join_tables(join_dfs['gene'], gene_annot_df, 'gene_id')
write_table(gene_annot_df, 'gene_annotation', output_dir, add_index=False)
# Build join tables
load_join_write('dataset_cell', data_dir, output_dir,
['dataset', 'cell'], join_dfs, add_index=False)
load_join_write('dataset_tissue', data_dir, output_dir,
['dataset', 'tissue'], join_dfs, add_index=False)
# TODO: temporary workaround for dataset_compound until we standardize drug -> compound
dataset_compound_df = load_table('dataset_compound', data_dir)
dataset_compound_df = join_tables(
dataset_compound_df, join_dfs['dataset'], 'dataset_id')
compound_df = join_dfs['drug'].copy()
compound_df.names = {'drug_id': 'compound_id'}
dataset_compound_df = join_tables(
dataset_compound_df, compound_df, 'compound_id')
dataset_compound_df = write_table(
dataset_compound_df, 'dataset_compound', output_dir, add_index=False)
# Build all other secondary tables
load_join_write('mol_cell', data_dir, output_dir,
['cell', 'dataset'], join_dfs)
# mol_cells has Kallisto. not sure why. from CTRPv2 (TODO)
load_join_write('dataset_statistics', data_dir,
output_dir, ['dataset'], join_dfs)
load_join_write('gene_drug', data_dir, output_dir, [
'gene', 'drug', 'dataset', 'tissue'], join_dfs)
return join_dfs
def combine_experiment_tables(data_dir, output_dir, join_dfs):
"""
Load and process experiment table, then use it to build the dose response
and profile tables. Drop the 'name' column from the experiment table before
writing to a CSV.
@param join_dfs: [`dict(string: datatable.Frame)`]
@param data_dir: [`string`] The file path to the PSet tables
@param output_dir: [`string`] The file path to the final tables
@return: [`None`]
"""
# Load all experiments from PSets
experiment_df = load_join_write('experiment', data_dir, output_dir, [
'cell', 'drug', 'dataset', 'tissue'], join_dfs)
# Don't write the 'name' column
experiment_df[:, ['id', 'cell_id', 'drug_id', 'dataset_id', 'tissue_id']].to_csv(
os.path.join(output_dir, 'experiment.csv'))
# Rename columns and key experiment table based on experiment name and dataset id
experiment_df.names = {'name': 'experiment_id'}
experiment_df = experiment_df[:, ['id', 'experiment_id', 'dataset_id']]
experiment_df.key = ('dataset_id', 'experiment_id')
join_dfs['experiment'] = experiment_df
# Nearly the same code as in load_join_write but has special case handling
for df_name in ['dose_response', 'profile']:
df = load_table(df_name, data_dir)
for fk in ['dataset', 'experiment']:
df = join_tables(df, join_dfs[fk], fk+'_id')
del df[:, 'dataset_id']
write_table(df, df_name, output_dir,
add_index=(df_name == 'dose_response'))
return join_dfs
def load_join_write(name, data_dir, output_dir, foreign_keys=[], join_dfs=None, add_index=True):
"""
Given the name of a table, load all PSet tables of that name from data_dir,
join them to any foreign key tables (specified by foreign_keys), and write
the final combined and joined table to output_dir as a CSV.
@param name: [`string`] The name of the table
@param data_dir: [`string`] File path to the directory with all PSet tables
@param output_dir: [`string`] The file path to the final tables
@param foreign_keys: [`list(string)`] An optional list of tables that this table
needs to be joined with
@param join_dfs: [`dict(string: datatable.Frame)`] An optional dictionary of join
tables (for building out foreign keys); keys are table names
@param add_index: [`bool`] Indicates whether or not to add a primary key (1-nrows)
when writing the final table to a .csv
@return: [`datatable.Frame`] The final combined and joined table
"""
df = load_table(name, data_dir)
if foreign_keys and join_dfs is None:
raise TypeError(f'The {name} table has foreign keys {foreign_keys} '
'but you have not passed any join tables.')
for fk in foreign_keys:
df = join_tables(df, join_dfs[fk], fk+'_id')
df = write_table(df, name, output_dir, add_index)
return df
def load_table(name, data_dir):
"""
Load all PSet tables with name into a datatable, dropping any duplicate rows.
@param name: [`string`] The name of the table
@param data_dir: [`string`] File path to the directory with all PSet tables
@return: [`datatable.Frame`] A datatable containing all rows from all PSets
"""
# Get all files
files = glob.glob(os.path.join(data_dir, '**', f'*{name}.csv'))
# Filter so that file path are '{data_dir}/{pset}/{pset}_{name}.csv'
files = [file_name for file_name in files if re.search(
data_dir + r'/(\w+)/\1_' + name + '.csv$', file_name)]
# Read and concatenate tables
df = rbind(*iread(files, sep=','))
# Replace any empty strings with None/NA
df.replace("", None)
# Drop duplicates
# (groups by all columns and selects only the first row from each group)
df = df[0, :, by(df.names)]
return df
def rename_and_key(df, join_col, og_col='name'):
"""
Prepare df to be joined with other tables by renaming the column
on which it will be joined and by keying it.
@param df: [`datatable.Frame`] The table to be keyed.
@param join_col: [`string`] The name of the join column in other tables
(ex. 'tissue_id', 'cell_id', etc.)
@param og_col: [`string`] The name of the join column in the join table
@return: [`datatable.Frame`] The keyed and renamed table
"""
# Rename primary key to match foreign key name (necessary for joins)
df.names = {og_col: join_col}
# Only select necessary rows
df = df[:, ['id', join_col]]
# Set the key
df.key = join_col
return df # Not necessary? df passed by reference
def join_tables(df1, df2, join_col):
"""
Join df2 and df1 based on join_col (left outer join by default).
@param df1: [`datatable.Frame`] The datatable with the foreign key
@param df2: [`datatable.Frame`] The join table (ex. tissue datatable)
@param join_col: [`string`] The name of the columns on which the tables
will be joined (ex. 'tissue_id')
@return [`datatable.Frame`] The new, joined table
"""
if (join_col not in df1.names) or (join_col not in df2.names):
print(f'{join_col} is missing from one or both of the datatables passed!',
'Make sure you have prepared df2 using rename_and_key().')
return None
# Join tables, then rename the join col and drop it
df = df1[:, :, join(df2)]
df.names = {join_col: 'drop', 'id': join_col}
del df[:, 'drop']
return df
def write_table(df, name, output_dir, add_index=True):
"""
Add a primary key to df ('id' column) and write it to output_dir
as a .csv file.
@param df: [`datatable.Frame`] A PharmacoDB table
@param name: [`string`] The name of the table
@param output_dir: [`string`] The directory to write the table to
@return: [`datatable.Frame`] The indexed PharmacoDB table
"""
print(f'Writing {name} table to {output_dir}...')
if add_index:
# Index datatable
df = cbind(dt.Frame(id=np.arange(df.nrows) + 1), df)
# Write to .csv
df.to_csv(os.path.join(output_dir, f'{name}.csv'))
return df
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,836 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/read_pset.py | import glob
import os
import re
import numpy as np
import pandas as pd
import swifter # Library to parallelize apply statements automagically
pset_name = 'GDSC_v1'
file_path = os.path.join('data', 'rawdata')
slot_names = ['curation', 'drug', 'molecularProfiles',
'sensitivity', 'annotation', 'cell']
def read_pset(pset_name, file_path, slot_names=['curation', 'drug', 'molecularProfiles', 'sensitivity', 'annotation', 'cell']):
"""
Read in all the data associated with a PharmacoSet from the .csv files exported by the writeToCsv method from rPharmacoDI.
@param pset_name: [`string`] Name of the PharmacoSet object as it appears in the directory name for files exported using
rPharmacoDIs writeToCsv function.
@param file_path: [`string`] Path to the directory where PharmacoSet data is stored.
@param slot_names: [`list`] A list of PharmacoSet slots to read in. Defaults to all slots in a PharmacoSet.
@return: [`DataFrame`] A DataFrame with the columns slot, containing the name of the slot data is from, one or more subitem columns, indicating
the subitem in a slot a data came from, file path, the path data was read from and data, which contains the object associated with the
selected slot and subitem.
"""
# Use regex to find the appropriate files
pset_dir = glob.glob(f'{os.path.join(file_path, pset_name)}_PSet')[0]
if pset_dir is None:
raise ValueError(
f'No PSet directory named {pset_name} could be found in {file_path}')
# List al files for the select PSet, then split on $ to make a DataFrame
pset_files = pd.Series(os.listdir(pset_dir))
pset_files_df = pset_files.str.split('$', expand=True)
# Build the file paths to read in data for each row of the DataFrame
pset_files_df['file_paths'] = [os.path.join(
pset_dir, file_name) for file_name in pset_files]
# Rename columns
pset_files_df.columns = [
'slot', *[f'subitems{i}' for i in range(1, pset_files_df.shape[1] - 1)], 'file_paths']
# Read in PSet data
pset_files_df['data'] = pset_files_df['file_paths'].swifter.apply(
read_pset_file)
# Drop file_paths column by reference
pset_files_df.drop('file_paths', axis='columns', inplace=True)
# Process id columns to use the proper slot names
pset_files_df.iloc[:, 0:-1] = pset_files_df.iloc[:, 0:-
1].apply(lambda col: col.str.replace('.*@|.csv.gz$|.txt', ''))
return pset_files_df
# ---- Helper methods for read_csv
def read_pset_file(file_path):
"""Deal with text files which can't be read in with pd.read_csv"""
if '.csv.gz' in file_path:
return pd.read_csv(file_path)
elif '.txt' in file_path:
with open(file_path) as f:
text = [line for line in f]
return text
else:
raise ValueError(
f'Unsupported file type passed to this function from: {file_path}')
def pset_df_to_nested_dict(df):
"""
Recrusively turn unique values in the first column of a DataFrame into dictionary keys until only 1 column remains.
@param df: [`DataFrame`] With one or more ID columns before a data column.
@return: [`dict`] A nested dict with levels equal to the number of columns of the original DataFrame minus one
"""
for key_check in pd.unique(df[df.columns[0]]):
if key_check is not None:
# Recursively nest the first column as key and remaning columns as values
return {key: pset_df_to_nested_dict(df.loc[df[df.columns[0]] == key, df.columns[1:]]) if
df[1:].shape[1] > 2 else df.loc[df[df.columns[0]] == key, 'data'].values[0] for
key in pd.unique(df[df.columns[0]])}
else:
# Return the data column if there no no key
return df.iloc[:, -1].values[0]
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,837 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/get_target_annotations.py | import os
#import PharmacoDI as di
import pandas as pd
import numpy as np
import urllib
import requests
from io import StringIO
from lxml import etree
def get_target_annotations(pset, annot_dir):
"""
Annotate a the 'TARGET' in the 'drug' slot of a PSet object using mapping from the UniProt idenitifer
mapping tool API.
:param pset:
:param annot_dir:
:return:
"""
# Read in drug target annotations and gene annotations
drug_targets = pd.read_csv(os.path.join(annot_dir, 'drugbank_drug_targets_all.csv'))
rnaseq_df = pset.get("molecularProfiles").get("Kallisto_0.46.1.rnaseq").get("elementMetadata")
# Map genes to drugbank drug ids
genes_to_drugs = pd.merge(drug_targets.loc[:, ['Name', 'Gene Name', 'Drug IDs']],
rnaseq_df.loc[:, ['gene_name', 'gene_id']],
left_on='Gene Name', right_on='gene_name')
# Annotate the genes
# Expand list columns into rows and annotate drugs
genes_to_drugs['Drug IDs'] = [str.split(ids, '; ') for ids in genes_to_drugs['Drug IDs'].values]
genes_to_drugs = genes_to_drugs.explode('Drug IDs')
# Write to disk if necessary.
file_path = os.path.join(annot_dir, 'drugbank_drug_to_gene_mappings.csv')
if not os.isfile(file_path):
pd.write_csv(genes_to_drugs, file_path)
pass
def query_uniprot_mapping_api(ids, convert_from="ACC+ID", to="ENSEMBL_ID"):
"""
Query the UniProt mapping API to convert between different gene/protein identifiers.
Defaults to converting UniProt AC or ID into ENESBLE gene id. The API, however,
supports a wide range of identifier conversions such as 'GENENAME', "EMBL", and
"P_ENTREZGENEID".
Unmatched ids fail silently and will be excluded from the resulting DataFrame. They
can be retrieved by redoing your query in manually at https://www.uniprot.org/uploadlists/.
Documentation for other potential conversions are available at:
https://www.uniprot.org/help/api_idmapping
:param ids: [`list`, `tuple` or `ndarray`] An iterable sequence type containing the gene/protein
identifiers as strings.
:param convert_from: [`string`] The UniProt abbreviation for a format of gene/protein identifier.
Defaults to 'ACC+ID'.
:param to: [`string`] The Uniprot abbreviation for the desired gene/protein identifier. Defaults
to 'ENSEMBL'.
:return: [`DataFrame`] With the columns 'From' and 'To', mapping from the current id to the selected
id type based on annotation in the UniProt database.
"""
# API URL
url = 'https://www.uniprot.org/uploadlists/'
# Build the query
query = ' '.join([str(id) for id in ids])
params = {
'from': convert_from,
'to': to,
'format': 'tab',
'query': query
}
query_string = urllib.parse.urlencode(params)
# Encode query and retrieve get request
query_string = query_string.encode('utf-8')
req = urllib.request.Request(url, query_string)
with urllib.request.urlopen(req) as f:
response = f.read()
# Decode query and convert to DataFrame
table_data = StringIO(response.decode("utf-8"))
uniprot_mapping_df = pd.read_table(table_data, sep="\t")
return(uniprot_mapping_df)
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,838 | bhklab/DataIngestion | refs/heads/master | /PharmacoDI/PharmacoDI/build_experiment_tables.py | import pandas as pd
import numpy as np
from PharmacoDI.build_primary_pset_tables import build_cell_df
def build_experiment_tables(pset_dict, pset_name, cell_df=None):
"""
Build the experiment, dose response, and profile tables for a PSet
and return them in a dictionary, with table names as the keys.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param cell_df: [`pd.DataFrame`] A table of all the cells in the PSet and their tissues
@return: [`dict`] A dictionary of experiment-related tables
"""
pset_dfs = {}
pset_dfs['experiment'] = build_experiment_df(pset_dict, pset_name, cell_df)
pset_dfs['dose_response'] = build_dose_response_df(pset_dict, pset_name)
pset_dfs['profile'] = build_profile_df(pset_dict, pset_name)
return pset_dfs
def build_experiment_df(pset_dict, pset_name, cell_df=None):
"""
Build a table with all the experiments of a PSet.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@param cell_df: [`pd.DataFrame`] A table of all the cells in the PSet and their tissues
@return: [`pd.DataFrame`] A table containing all experiments in the dataset
"""
# Build cell_df if not build already
## FIX: Truth value of a DataFrame is ambiguous (Chris)
if cell_df is None:
cell_df = build_cell_df(pset_dict)
# Extract relelvant experiment columns
experiment_df = pset_dict['sensitivity']['info'][[
'.rownames', 'cellid', 'drugid']].copy()
# Rename columns
experiment_df.rename(
columns={'.rownames': 'experiment_id', 'cellid': 'cell_id', 'drugid': 'drug_id'}, inplace=True)
# Add datset_id column
experiment_df['dataset_id'] = pset_name
# Add tissue_id column by joining with cells_df
experiment_df = pd.merge(experiment_df, cell_df[['name', 'tissue_id']],
left_on='cell_id', right_on='name', how='left')
experiment_df = experiment_df[[
'experiment_id', 'cell_id', 'drug_id', 'dataset_id', 'tissue_id']]
experiment_df.rename(columns={'experiment_id': 'name'}, inplace=True)
return experiment_df
# TODO:: Do Python functions pass my reference or copy by default?
# - Python neither; it is 'pass-by-object-reference'
# - When you assign a variable in Python, it creates a name in the namespace; that name contains
# a reference to the value of that variable (You can use the id() function to see the memory address of an object)
# - When you pass a variable to a function in Python, it creates a new variable, which is a copy of the
# original variable. There are now two DIFFERENT objects, which contain the memory address of the same value
# - Therefore, if you modify that variable BY REFERENCE within the function, then it will modify the actual value;
# the original variable will then also be modified (because it points to the same memory location)
# - However, if you reassign the variable name within the function scope, it DOES NOT affect the original variable;
# instead it modifies the copy of the variable with a reference to a different memory location (the new value)
# - Outside of the function, the original variable still holds a reference to the old memory location; therefore
# the value of that object is not changed
def build_dose_response_df(pset_dict, pset_name):
"""
Build a table that, for each experiment in a dataset, lists the drug that was
tested, the doses in which that drug was administered, and the viability responses
corresponding to all the doses.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@return: [`pd.DataFrame`] A table with all the dose-response mappings for
each experiment
"""
# Get dose and response info from pset
dose = pset_dict['sensitivity']['raw.Dose']
response = pset_dict['sensitivity']['raw.Viability']
# Rename columns so they can be coerced to int later
dose_pattern = dose.columns[1][:-1]
rename_dict = {f'{dose_pattern}{n}': str(
n) for n in np.arange(1, dose.shape[0])}
dose.rename(columns=rename_dict, inplace=True)
response.rename(columns=rename_dict, inplace=True)
# Reshape the DataFrames using melt to go from 'wide' to 'long'
dose = dose.melt(id_vars='.exp_id', value_name='dose',
var_name='dose_id').dropna()
dose['dose_id'] = dose.dose_id.astype('int')
response = response.melt(
id_vars='.exp_id', value_name='response', var_name='dose_id').dropna()
response['dose_id'] = response.dose_id.astype('int')
# Set indices for faster joins (~3x)
dose.set_index(['.exp_id', 'dose_id'], inplace=True)
response.set_index(['.exp_id', 'dose_id'], inplace=True)
# Join on index
dose_response_df = pd.merge(
dose, response, left_index=True, right_index=True).reset_index()
dose_response_df.rename(columns={'.exp_id': 'experiment_id'}, inplace=True)
dose_response_df.drop(columns=['dose_id'], inplace=True)
# Add dataset_id for joins
dose_response_df['dataset_id'] = pset_name
return dose_response_df
def build_profile_df(pset_dict, pset_name):
"""
TODO: ask Chris
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@return: [`pd.DataFrame`] A table containing all statistics for each profile
in the PSet (?)
"""
# Get profiles info
if 'E_inf' in pset_dict['sensitivity']['profiles'].columns:
profile_df = pset_dict['sensitivity']['profiles'][[
'.rownames', 'aac_recomputed', 'ic50_recomputed', 'HS', 'E_inf', 'EC50']].copy()
profile_df.rename(columns={'.rownames': 'experiment_id', 'aac_recomputed': 'AAC',
'ic50_recomputed': 'IC50', 'E_inf': 'Einf'}, inplace=True)
else:
profile_df = pset_dict['sensitivity']['profiles'][[
'.rownames', 'aac_recomputed', 'ic50_recomputed', 'slope_recomputed', 'einf', 'ec50']].copy()
profile_df.rename(columns={'.rownames': 'experiment_id', 'aac_recomputed': 'AAC', 'slope_recomputed': 'HS',
'ic50_recomputed': 'IC50', 'einf': 'Einf', 'ec50': 'EC50'}, inplace=True)
# Add DSS columns - TODO get these values when they are eventually computed
profile_df['DSS1'] = np.nan
profile_df['DSS2'] = np.nan
profile_df['DSS3'] = np.nan
# Add dataset_id for joins
profile_df['dataset_id'] = pset_name
return profile_df[['experiment_id', 'HS', 'Einf', 'EC50', 'AAC',
'IC50', 'DSS1', 'DSS2', 'DSS3', 'dataset_id']]
| {"/PharmacoDI/PharmacoDI/__init__.py": ["/PharmacoDI/PharmacoDI/download_psets.py", "/PharmacoDI/PharmacoDI/download_canonical_psets.py", "/PharmacoDI/PharmacoDI/download_gene_signatures.py", "/PharmacoDI/PharmacoDI/get_gene_targets.py", "/PharmacoDI/PharmacoDI/get_target_annotations.py", "/PharmacoDI/PharmacoDI/read_pset.py", "/PharmacoDI/PharmacoDI/build_all_pset_tables.py", "/PharmacoDI/PharmacoDI/get_chembl_targets.py", "/PharmacoDI/PharmacoDI/get_chembl_drug_targets.py", "/PharmacoDI/PharmacoDI/build_target_tables.py", "/PharmacoDI/PharmacoDI/combine_pset_tables.py", "/PharmacoDI/PharmacoDI/build_synonym_tables.py", "/PharmacoDI/PharmacoDI/build_cellosaurus.py", "/PharmacoDI/PharmacoDI/build_clinical_trial_tables.py"]} |
45,842 | lucasjt93/Nutrilator | refs/heads/main | /Nutrilator/calculator.py | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from Nutrilator.auth import login_required
from Nutrilator.db import get_db
from datetime import datetime
bp = Blueprint('calculator', __name__)
@bp.route('/calculator', methods=('GET', 'POST'))
@login_required
def calculator():
# get username + message
username = g.user['username']
messages = [f'Hi {username}, calculate your macros!', f'Hi {username}, update your progress!']
timestamp = str(datetime.now())
if request.method == 'POST':
db = get_db()
# define required fields
required = {
'age': int(request.form['age']),
'gender': request.form['gender'],
'weight': int(request.form['weight']),
'height': int(request.form['height']),
'activity': request.form['activity'],
'goal': request.form['goal']
}
# handles none in required at serve-side (its also checked at client-side)
for k, v in required.items():
if not v:
print(k)
flash(f'Missing required field {k}', category="error")
return render_template('calculator/calculator.html', messages=messages), 403
# resting energy expenditure (REE)
if required['gender'] == 'male':
# male
REE = 10 * int(required['weight']) + 6.25 * int(required['height']) - 5 * int(required['age']) + 5
else:
# female
REE = 10 * int(required['weight']) + 6.25 * int(required['height']) - 5 * int(required['age']) - 161
# total daily energy expenditure (TDEE)
if required['activity'] == 'Sedentary':
TDEE = REE * 1.2
elif required['activity'] == 'Light - 3 times per week':
TDEE = REE * 1.375
elif required['activity'] == 'Mid - 5 times per week':
TDEE = REE * 1.55
else:
TDEE = REE * 1.725
# kcal per goal
if required['goal'] == 'Lose weight':
TDEE = TDEE - (TDEE * 0.20)
elif required['goal'] == 'Gain weight':
TDEE = TDEE + (TDEE * 0.20)
# save user data into db
db.execute(
'INSERT INTO users_data VALUES (?, ?, ?, ?, ?, ?, ?)',
g.user['id'],
required['age'],
required['gender'],
required['weight'],
required['height'],
required['goal'],
timestamp
)
# calculate initial macros per kcal
protein = 0.825 * (required['weight']/0.453592)
fat = (TDEE * 0.3) / 9
carbo = (TDEE - (protein * 4) - (fat * 9)) / 4
# save macros into db
if not g.macros:
db.execute(
'INSERT INTO macros VALUES (?, ?, ?, ?, ?)',
g.user['id'],
TDEE,
protein,
carbo,
fat
)
else:
db.execute(
'UPDATE macros SET tdee = ?, protein = ?, carbo = ?, fat = ? WHERE user_id = ?',
TDEE,
protein,
carbo,
fat,
g.user['id']
)
return redirect(url_for('index'))
# User data to determine template
user_data = get_db().execute(
'SELECT * FROM users_data WHERE user_id = ?', g.user['id']
)
if user_data:
user_data = user_data[0]
return render_template('calculator/calculator.html', messages=messages, user_data=user_data)
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,843 | lucasjt93/Nutrilator | refs/heads/main | /instance/config.py | import os
# General config
DATABASE = os.getenv("DATABASE_URL")
SECRET_KEY = os.getenv("SECRET_KEY")
FLASK_ENV = os.getenv("FLASK_ENV")
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,844 | lucasjt93/Nutrilator | refs/heads/main | /Nutrilator/foodtracker.py | from flask import (
Blueprint, flash, jsonify, json, g, redirect, render_template, request, url_for
)
import os
import requests
from Nutrilator.auth import login_required
from Nutrilator.db import get_db
from datetime import date
bp = Blueprint('foodtracker', __name__)
@bp.route('/foodtracker', methods=('GET', 'POST'))
@login_required
def foodtracker():
if request.method == 'POST':
db = get_db()
# Retrieve form data from request
try:
food_data = {
'food_name': str(request.form['name']),
'food_weight': float(request.form['weight']),
'food_kcal': float(request.form['kcal']),
'food_carbs': float(request.form['carbs']),
'food_protein': float(request.form['protein']),
'food_fat': float(request.form['fat']),
'quantity': request.form['quantity']
}
# if no data selected
except:
flash('Select a food from the database', category='error')
return render_template('foodtracker/foodtracker.html')
# If quantity is not submitted
if not food_data['quantity']:
food_data['quantity'] = 1
# Retrieve date from datetime api
today = date.today()
# Save into db
db.execute(
'INSERT INTO food_logs VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
g.user['id'],
today,
food_data['food_name'],
food_data['food_weight'],
food_data['food_kcal'],
food_data['food_carbs'],
food_data['food_protein'],
food_data['food_fat'],
food_data['quantity']
)
return redirect(url_for('index'))
return render_template('foodtracker/foodtracker.html')
@bp.route('/foodtracker/search', methods=['POST'])
def search():
# Look up for food in Nutritionix api
if request.method == 'POST':
data = request.get_data().decode()
# Contact API
try:
url = f"https://trackapi.nutritionix.com/v2/natural/nutrients"
json_data = {"query": data}
headers = {
"x-app-id": os.environ.get("APP_ID"),
"x-app-key": os.environ.get("API_KEY"),
"Content-Type": "application/json"
}
response = requests.post(
url,
headers=headers,
data=json.dumps(json_data)
)
except requests.RequestException:
return jsonify(None)
# Parse response
try:
food = response.json()
response_data = {
"name": food.get('foods')[0].get('food_name'),
"weight": food.get('foods')[0].get('serving_weight_grams'),
"kcal": food.get('foods')[0].get('nf_calories'),
"carbs": food.get('foods')[0].get('nf_total_carbohydrate'),
"protein": food.get('foods')[0].get('nf_protein'),
"fat": food.get('foods')[0].get('nf_total_fat')
}
return jsonify(response_data)
# Handle not found in Nutritionix api
except (KeyError, TypeError, ValueError):
return jsonify(None)
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,845 | lucasjt93/Nutrilator | refs/heads/main | /Nutrilator/__init__.py | import os
from flask import Flask, render_template, g
from Nutrilator.db import get_db
from Nutrilator.auth import login_required
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'nutrilator.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# initiate db CLI
from . import db
db.init_app(app)
# Index view
@app.route("/")
def index():
if g.user:
# Get user data
user_data = get_db().execute(
'SELECT * FROM users_data WHERE user_id = ? ORDER BY date desc LIMIT 1', g.user['id']
)
if user_data:
user_data = user_data[0]
# Get weight data
weight_data = get_db().execute(
'SELECT weight, date FROM users_data WHERE user_id = ?', g.user['id']
)
# Data set for chartjs
weight_data = [dict(row) for row in weight_data]
labels = [weight_data[n]['date'][:10] for n in range(len(weight_data))]
data = [weight_data[n]['weight'] for n in range(len(weight_data))]
return render_template(
'index.html',
user_data=user_data,
labels=labels,
data=data
)
else:
return render_template('index.html')
# Auth bp
from . import auth
app.register_blueprint(auth.bp)
# Calculator bp
from . import calculator
app.register_blueprint(calculator.bp)
# Food tracker bp
from . import foodtracker
app.register_blueprint(foodtracker.bp)
# Food log
@app.route("/foodlog")
@login_required
def foodlog():
# Retrieve history of all his foods
user_log = get_db().execute(
'SELECT * FROM food_logs WHERE user_id = ? ORDER BY date DESC', g.user['id']
)
return render_template('foodlog/foodlog.html', user_log=user_log)
return app
# For WSGI in Procfile
nutrilator = create_app()
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,846 | lucasjt93/Nutrilator | refs/heads/main | /Nutrilator/auth.py | import functools
import os
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from Nutrilator.db import get_db
from datetime import date
bp = Blueprint('auth', __name__, url_prefix='/auth')
# Get flask env from env variable
env = os.getenv("FLASK_ENV")
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
# Check if user exists
check_user = get_db().execute(
'SELECT id FROM users WHERE username = ?', username
)
# Error checking
if not username:
error = 'Must provide username'
elif not password:
error = 'Must provide password'
elif check_user:
error = f'User {username} is already registered, sorry!'
# Register the user
if error is None:
get_db().execute(
'INSERT INTO users (username, password) VALUES (?, ?)',
username, generate_password_hash(password)
)
flash('User correctly registered', category='message')
return redirect(url_for('auth.login'))
else:
flash(error, category='error')
return render_template('auth/register.html'), 400
return render_template('auth/register.html')
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
user = get_db().execute(
'SELECT * FROM users WHERE username = ?', username
)
if not user:
error = 'Incorrect username'
elif not check_password_hash(user[0].get('password'), password):
error = 'Incorrect password'
if error is None:
# First clear the session
session.clear()
# Remember user logged in
session['user_id'] = user[0].get('id')
return redirect(url_for('index'))
else:
flash(error, category='error')
return render_template('auth/login.html'), 400
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
# User id to flask global variable
g.user = get_db().execute(
'SELECT * FROM users WHERE id = ?', user_id
)[0]
# Macros to flask global variable
macros = get_db().execute(
'SELECT * FROM macros WHERE user_id = ?', user_id,
)
if macros:
g.macros = macros[0]
else:
g.macros = None
# Log to flask global variable
g.log = get_db().execute(
''' SELECT SUM(food_kcal) as kcal,
SUM(food_carbs) as carbs,
SUM(food_protein) as protein,
SUM(food_fat) as fat
FROM food_logs WHERE user_id = ? AND date = ?''',
g.user['id'], date.today()
)[0]
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,847 | lucasjt93/Nutrilator | refs/heads/main | /Nutrilator/db.py | import sqlite3
import os
import cs50
import click
from datetime import datetime, timedelta
from flask import current_app, g
from flask.cli import with_appcontext
# Get flask env from env variable
env = os.getenv("FLASK_ENV")
db_name = os.getenv("DATABASE_URL")
def get_db():
if 'db' not in g:
# Connect to heroku psql
if env == "production":
# Heroku solution for postgres change of domain
uri = os.getenv("DATABASE_URL")
if uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
# rest of connection code using the connection string `uri`
g.db = cs50.SQL(uri)
else:
# Connect to local sqlite3
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
# Get db from g
db = g.pop('db', None)
# Env determination for closing
if db is not None:
if env == "development":
db.close()
elif env == "production":
# Workaround for fixing connection limit in postgres with Heroku free account
utc_time = datetime.utcnow() - timedelta(0, 10)
db.execute(
'SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = ? AND state = ? AND state_change < ?',
db_name[-14:],
'idle',
utc_time
)
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
# Clear existing data and create new tables
init_db()
click.echo('Db initialized!')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| {"/Nutrilator/calculator.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/foodtracker.py": ["/Nutrilator/auth.py", "/Nutrilator/db.py"], "/Nutrilator/__init__.py": ["/Nutrilator/db.py", "/Nutrilator/auth.py"], "/Nutrilator/auth.py": ["/Nutrilator/db.py"]} |
45,848 | mrsuger/blog_django | refs/heads/master | /my_blog/blog/models.py | from django.db import models
# Create your models here.
class Category(models.Model):
'''分类'''
name = models.CharField('名称', max_length = 16)
def __str__(self):
return self.name
class Tag(models.Model):
'''标签'''
name = models.CharField('名称', max_length = 16)
def __str__(self):
return self.name
class Blog(models.Model):
'''博客'''
title = models.CharField('标题', max_length = 32)
author = models.CharField('作者', max_length = 16)
content = models.TextField('正文')
created = models.DateTimeField('发布时间', auto_now_add = True)
category = models.ForeignKey(Category, verbose_name = '分类')
tags = models.ManyToManyField(Tag, verbose_name = '标签')
def __str__(self):
return self.title
class Comment(models.Model):
'''评论'''
blog = models.ForeignKey(Blog, verbose_name = '博客')
name = models.CharField('称呼', max_length = 16)
email = models.EmailField('邮箱')
content = models.CharField('内容', max_length = 140)
created = models.DateTimeField('发布时间', auto_now_add = True)
| {"/my_blog/blog/views.py": ["/my_blog/blog/models.py"], "/my_blog/blog/templatetags/blog_tags.py": ["/my_blog/blog/models.py"]} |
45,849 | mrsuger/blog_django | refs/heads/master | /my_blog/blog/views.py | import markdown
from django.shortcuts import render, get_object_or_404
from .models import Blog, Category, Tag
from .forms import CommentForm
from django.http import Http404, HttpResponse
# 导入分页包
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def detail(request, blog_id):
try:
post = Blog.objects.get(id=blog_id)
except Blog.DoesNotExist:
raise Http404
post.content = markdown.markdown(post.content,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
return render(request, 'detail.html', {'post': post})
def home(request):
blogs = Blog.objects.all().order_by('-created')
paginator = Paginator(blogs, 3)
page = request.GET.get('page')
try:
blog_post = paginator.page(page)
except PageNotAnInteger:
blog_post = paginator.page(1)
except EmptyPage:
blog_post = paginator.paginator(paginator.num_pages)
return render(request, 'home.html', {'blog_post': blog_post})
def about_me(request):
return render(request,'aboutme.html')
# archives页面获得year和month参数,从而显示相应的页面
def archives(request, year, month):
blog_date_list = Blog.objects.filter(created__year=year, created__month=month).order_by('created')
return render(request, 'archives.html', {'blog_date_list': blog_date_list})
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
posts = Blog.objects.filter(category=cate).order_by('created')
return render(request, 'category.html', {'posts': posts})
def tags(request, pk):
tag = get_object_or_404(Tag, pk=pk)
posts = Blog.objects.filter(tags=tag).order_by('created')
return render(request, 'tag.html', {'posts': posts})
def search(request):
# s为表单输入的参数
if 's' in request.GET:
s = request.GET['s']
if not s:
return render(request, 'home.html')
else:
post_list = Blog.objects.filter(title__icontains = s)
if len(post_list) == 0:
return render(request, 'search.html', {'post_list': post_list,
'error': True})
else:
return render(request, 'search.html', {'post_list': post_list,
'error': False})
return redirect('/') | {"/my_blog/blog/views.py": ["/my_blog/blog/models.py"], "/my_blog/blog/templatetags/blog_tags.py": ["/my_blog/blog/models.py"]} |
45,850 | mrsuger/blog_django | refs/heads/master | /my_blog/blog/templatetags/blog_tags.py | from django.db.models.aggregates import Count
from django import template
from ..models import Blog, Category, Tag
register = template.Library()
# 归档模板标签
@register.simple_tag
def archives():
return Blog.objects.dates('created', 'month', order='DESC')
# 分类模板标签
@register.simple_tag
def get_categories():
return Category.objects.all()
# 标签云模板标签
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('blog')).filter(num_posts__gt=0) | {"/my_blog/blog/views.py": ["/my_blog/blog/models.py"], "/my_blog/blog/templatetags/blog_tags.py": ["/my_blog/blog/models.py"]} |
45,851 | mrsuger/blog_django | refs/heads/master | /my_blog/blog/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^detail/(?P<blog_id>\d+)/$', views.detail, name = 'get_detail'),
url(r'^home/$', views.home, name = 'get_home'),
url(r'^aboutme/$', views.about_me, name = 'about_me'),
# archives页面需要传入两个参数
url(r'^archives/(?P<year>\d+)/(?P<month>\d+)/$', views.archives, name = 'get_archives'),
# category页面传入任意字符串
url(r'^category/(?P<pk>[0-9]+)/$', views.category, name = 'get_category'),
url(r'^tag/(?P<pk>[0-9]+)/$', views.tags, name='get_tag'),
url(r'^search/$', views.search, name = 'get_search'),
] | {"/my_blog/blog/views.py": ["/my_blog/blog/models.py"], "/my_blog/blog/templatetags/blog_tags.py": ["/my_blog/blog/models.py"]} |
45,868 | ta-verma/Bhagvadgita-API | refs/heads/master | /pygita.py | from lxml import html
import requests
from bs4 import BeautifulSoup
####################################################################
# API
####################################################################
class Bhagvadgita:
@staticmethod
def get_by_chapter_and_verse(chapter_number, veser_number):
url = "https://bhagavadgita.io/chapter/{}/verse/{}/hi/".format(str(chapter_number), str(veser_number))
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
sanskrit = (str(soup.find('p')).split("<b>")[1]).split("</b>")[0]
hindi_meaning = (str(soup.findAll('i')).split(",")[4]).split('<i>')[1].split('</i>')[0]
hindi = (str(soup.findAll("p")).split(";")[3]).split(">")[1].split("</")[0]
url = "https://bhagavadgita.io/chapter/{}/verse/{}/".format(str(chapter_number), str(veser_number))
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
english = str(soup.findAll('p')).split(">")[13].split("</")[0]
dict = {
'sloak' : sanskrit,
'hindi' : hindi,
'english' : english,
'wrd_mean' : hindi_meaning
}
return dict
| {"/server.py": ["/pygita.py"]} |
45,869 | ta-verma/Bhagvadgita-API | refs/heads/master | /server.py | from flask import Flask, jsonify
from pygita import Bhagvadgita
from flask_cors import CORS
app = Flask (__name__)
CORS(app)
############################################
# Index
############################################
@app.route ('/', methods=['GET'])
def index_route () :
return jsonify({
'author' : 'Tarun Verma',
'author_url' : 'http://github.com/ta-verma',
'base_url' : 'gita-api.herokuapp.com',
'project_name' : 'Bhagvadgita API',
'project_url' : 'http://ta-verma.github.io/Bhagvadgita-API'
})
############################################
# Bhagvadgita
###########################################
#Gita
@app.route ('/gita/chapter/<ch_num>/verse/<ver_num>', methods=['GET'])
def get_sloak (ch_num, ver_num) :
result = dict(Bhagvadgita.get_by_chapter_and_verse(ch_num, ver_num))
return jsonify (sanskrit=result['sloak'],
english=result['english'],
hindi=result['hindi'],
hindi_meaning=result['wrd_mean'])
###########################################
#Start Flask
###########################################
if __name__ == "__main__":
app.run()
| {"/server.py": ["/pygita.py"]} |
45,887 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/utils.py | import icalendar
from . import models
def generate_ical(**filters):
cal = icalendar.Calendar()
for event in models.Event.objects.filter(**filters):
if event.ical and event.public:
event_ical = icalendar.Event.from_ical(event.ical)
cal.add_component(event_ical)
return cal
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,888 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('slug', django_extensions.db.fields.AutoSlugField(editable=False, blank=True, populate_from='name')),
('description', models.TextField()),
('website', models.URLField(blank=True, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('group', models.ForeignKey(related_name='memberships', to='groups.Group')),
('user', models.ForeignKey(related_name='memberships', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='groups.GroupMembership', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,889 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/migrations/0003_auto_20140823_0015.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_resourcelink'),
]
operations = [
migrations.AddField(
model_name='resourcefile',
name='title',
field=models.CharField(max_length=255, default=''),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='title',
field=models.CharField(max_length=255, default=''),
preserve_default=False,
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,890 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/urls.py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^create/$', views.ResourceCreate.as_view(), name='create'),
url(r'^detail/(?P<slug>\S+)/remove_upvote$', views.resource_remove_upvote, name='remove_upvote'),
url(r'^detail/(?P<slug>\S+)/upvote$', views.resource_upvote, name='upvote'),
url(r'^detail/(?P<slug>\S+)/update/delete_file$', views.resource_file_delete, name='delete-file'),
url(r'^detail/(?P<slug>\S+)/update$', views.ResourceUpdate.as_view(), name='update'),
url(r'^detail/(?P<slug>\S+)$', views.ResourceDetail.as_view(), name='detail'),
url(r'^$', views.ResourceList.as_view(), name='list'),
)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,891 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/templatetags/resources_tags.py | from django import template
register = template.Library()
@register.inclusion_tag('resources/includes/resource_list.html')
def resource_list(resources, profile_view=False):
return {
'resources': resources,
'profile_view': profile_view
} | {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,892 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/migrations/0002_auto_20150212_1904.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('blogs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='is_deleted',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', verbose_name='Tags', to='taggit.Tag', blank=True, through='taggit.TaggedItem'),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='blog',
field=models.ForeignKey(to='blogs.Blog', related_name='posts'),
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,893 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/models.py | from django.db import models
from django.conf import settings
from django_extensions.db.fields import AutoSlugField
from taggit.managers import TaggableManager
from apps.core.models import TimestampedModel
class Resource(TimestampedModel):
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title')
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
description = models.TextField()
tags = TaggableManager(blank=True)
class Meta:
verbose_name = 'resource'
verbose_name_plural = 'resources'
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('resources:detail', kwargs={'slug': self.slug})
def __str__(self):
return self.title
def upvote(self, user):
"""
Raises IntegrityError if user has already upvoted resource
:param user:
"""
try:
self.resourceupvote_set.create(user=user, resource=self)
except ResourceUpvote.DoesNotExist:
pass
def remove_upvote(self, user):
try:
upvote = self.resourceupvote_set.get(user=user, resource=self)
upvote.delete()
except ResourceUpvote.DoesNotExist:
pass
@property
def upvotes(self):
return self.resourceupvote_set.count()
class ResourceUpvote(TimestampedModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
resource = models.ForeignKey('Resource')
class Meta:
unique_together = ('user', 'resource')
class ResourceFile(TimestampedModel):
resource = models.ForeignKey('Resource', related_name='files')
file = models.FileField(upload_to='resources')
class Meta:
verbose_name = 'resource file'
verbose_name_plural = 'resource files'
def filename(self):
return self.file.name.split('/')[-1]
class ResourceLink(TimestampedModel):
resource = models.ForeignKey('Resource', related_name='links')
title = models.CharField(max_length=255)
url = models.URLField()
class Meta:
verbose_name = 'resource link'
verbose_name_plural = 'resource links'
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,894 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/models.py | from django.db import models
from django.conf import settings
from django.contrib.auth import models as auth_models
from django_extensions.db.fields import AutoSlugField
from apps.core.models import TimestampedModel
from apps.groups.models import Group
import icalendar
from taggit.managers import TaggableManager
class Event(TimestampedModel):
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title')
place = models.CharField(max_length=255)
start = models.DateTimeField()
end = models.DateTimeField(null=True, blank=True)
description = models.TextField()
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
group = models.ForeignKey(Group, null=True, blank=True)
tags = TaggableManager(blank=True)
public = models.BooleanField(default=False)
ical = models.TextField(null=True, blank=True)
class Meta:
verbose_name = 'event'
verbose_name_plural = 'events'
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('events:detail', kwargs={'slug': self.slug})
def __str__(self):
return self.title
def save(self, *args, **kwargs):
# We need the slug to create description for the iCal generation
if not self.slug:
super().save(*args, **kwargs)
event = icalendar.Event()
event.add('dtstart', self.start)
if self.end:
event.add('dtend', self.end)
event.add('summary', self.title)
event.add('description', self.get_absolute_url())
self.ical = event.to_ical()
super().save(*args, **kwargs)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,895 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/admin.py | from django.contrib import admin
from .models import Resource, ResourceFile, ResourceLink, ResourceUpvote
class ResourceFileInline(admin.TabularInline):
model = ResourceFile
class ResourceLinkInline(admin.TabularInline):
model = ResourceLink
class ResourceAdmin(admin.ModelAdmin):
model = Resource
inlines = [ResourceFileInline, ResourceLinkInline]
admin.site.register(Resource, ResourceAdmin)
admin.site.register(ResourceFile)
admin.site.register(ResourceLink)
admin.site.register(ResourceUpvote) | {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,896 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/managers.py | from django.db import models
class PostQuerySet(models.QuerySet):
def deleted(self):
return self.filter(is_deleted=True)
def live(self):
return self.filter(is_deleted=False)
def public(self):
return self.filter(public=True, is_deleted=False)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,897 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/migrations/0004_auto_20140824_1039.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0003_auto_20140823_0015'),
]
operations = [
migrations.AlterField(
model_name='resourcefile',
name='resource',
field=models.ForeignKey(to='resources.Resource', related_name='files'),
),
migrations.AlterField(
model_name='resourcelink',
name='resource',
field=models.ForeignKey(to='resources.Resource', related_name='links'),
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,898 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/core/views/__init__.py | from django.utils import timezone
from django.views.generic import TemplateView
from allauth.account.views import LogoutView, LoginView
from ...events.models import Event
class FrontPage(TemplateView):
template_name = 'core/frontpage.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['events'] = Event.objects.filter(
public=True, start__gt=timezone.now()).order_by('start')[:5]
return context
class ProfileView(TemplateView):
template_name = 'core/profile.html'
class Logout(LogoutView):
template_name = 'core/logout.html'
class Login(LoginView):
template_name = 'core/login.html'
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,899 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/urls.py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^calendar\.ics$', views.CalendarFeed.as_view(),
name='calendar'),
url(r'^calend[ae]r(|\.ics)$', views.CalendarFeed.as_view(),
name='calendar'),
url(r'^create$', views.EventCreate.as_view(),
name='create'),
url(r'^detail/(?P<slug>\S+)/update$', views.EventUpdate.as_view(),
name='update'),
url(r'^detail/(?P<slug>\S+)$', views.EventDetail.as_view(),
name='detail'),
url(r'^$', views.EventList.as_view(), name='list'),
) | {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,900 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/views.py | from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import ListView, DetailView, CreateView, \
UpdateView
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from ..core.views.mixins import LoginRequiredMixin
from . import models, forms
class ResourceList(ListView):
template_name = 'resources/resource_list.html'
queryset = models.Resource.objects.all()
context_object_name = 'resources'
class ResourceDetail(DetailView):
template_name = 'resources/resource_detail.html'
model = models.Resource
context_object_name = 'resource'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
# Anonymous users can't like stuff
if not user.is_anonymous():
try:
context['has_liked'] = models.ResourceUpvote.objects.filter(
user=user,
resource=self.object
).exists()
except models.ResourceUpvote.DoesNotExist:
context['has_liked'] = False
return context
@login_required
@csrf_exempt
def resource_upvote(request, **kwargs):
slug = kwargs.get('slug', None)
resource = models.Resource.objects.get(slug=slug)
# Handle if the resource already is upvoted by the user
# Should not happen, but everything is possible...
try:
resource.upvote(request.user)
except IntegrityError:
return HttpResponse(status=500)
return HttpResponse(content=resource.upvotes, status=200)
@login_required
@csrf_exempt
def resource_remove_upvote(request, **kwargs):
slug = kwargs.get('slug', None)
resource = models.Resource.objects.get(slug=slug)
resource.remove_upvote(request.user)
return HttpResponse(content=resource.upvotes, status=200)
class ResourceCreate(LoginRequiredMixin, CreateView):
template_name = 'resources/resource_form.html'
model = models.Resource
form_class = forms.ResourceForm
def form_valid(self, form):
form.instance.owner = self.request.user
resource = form.save()
for file in self.request.FILES.getlist('resourcefile'):
models.ResourceFile.objects.create(resource=resource, file=file)
return HttpResponseRedirect(resource.get_absolute_url())
class ResourceUpdate(LoginRequiredMixin, UpdateView):
template_name = 'resources/resource_form.html'
model = models.Resource
form_class = forms.ResourceForm
def form_valid(self, form):
resource = form.save()
for file in self.request.FILES.getlist('resourcefile'):
models.ResourceFile.objects.create(resource=resource, file=file)
return HttpResponseRedirect(resource.get_absolute_url())
@login_required
@csrf_exempt
def resource_file_delete(request, **kwargs):
pk = request.POST.get('pk', None)
if pk:
file = models.ResourceFile.objects.get(pk=pk)
file.delete()
return HttpResponse(status=200)
# Something went wrong
return HttpResponse(status=500)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,901 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/views.py | from django.http import HttpResponse
from django.views.generic import ListView, DetailView, CreateView, UpdateView, \
View
from django.utils import timezone
from . import models, forms
from apps.core.views.mixins import LoginRequiredMixin
from apps.events.utils import generate_ical
class EventList(ListView):
template_name = 'events/event_list.html'
context_object_name = 'events'
queryset = models.Event.objects.filter(
public=True, start__gt=timezone.now()).order_by('start')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['past_events'] = models.Event.objects.filter(
public=True, start__lt=timezone.now()).order_by('start')
return context
class EventDetail(DetailView):
template_name = 'events/event_detail.html'
context_object_name = 'event'
model = models.Event
class EventCreate(LoginRequiredMixin, CreateView):
template_name = 'events/event_form.html'
model = models.Event
form_class = forms.EventForm
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['action'] = 'new'
return context
class EventUpdate(LoginRequiredMixin, UpdateView):
template_name = 'events/event_form.html'
model = models.Event
form_class = forms.EventForm
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['action'] = 'update'
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class CalendarFeed(View):
def get(self, *args):
cal = generate_ical()
return HttpResponse(content=cal.to_ical())
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,902 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/urls.py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(
r'^detail/(?P<slug>\S+)/calendar\.ics$',
views.GroupICal.as_view(),
name='ical'
),
url(
r'^detail/(?P<slug>\S+)/calend[ae]r(|\.ics)$',
views.GroupICal.as_view(),
name='ical'
),
url(
r'^detail/(?P<slug>\S+)/update$',
views.GroupUpdate.as_view(),
name='update'
),
url(
r'^detail/(?P<slug>\S+)/members$',
views.GroupMembers.as_view(),
name='members'
),
url(
r'^detail/(?P<slug>\S+)/join$',
views.GroupJoin.as_view(),
name='join'
),
url(
r'^detail/(?P<slug>\S+)/make-admin/(?P<user_id>\d+)$',
views.GroupMakeAdmin.as_view(),
name='make-admin'
),
url(
r'^detail/(?P<slug>\S+)/remove-admin/(?P<user_id>\d+)$',
views.GroupRemoveAdmin.as_view(),
name='remove-admin'
),
url(
r'^detail/(?P<slug>\S+)$',
views.GroupDetail.as_view(),
name='detail'
),
url(
r'^create$',
views.GroupCreate.as_view(),
name='create'
),
url(
r'^$',
views.GroupList.as_view(),
name='list'
),
)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,903 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/models.py | from django.db import models
from django.conf import settings
from django_extensions.db.fields import AutoSlugField
from ..core.models import TimestampedModel
class Group(TimestampedModel):
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
description = models.TextField()
website = models.URLField(null=True, blank=True)
email = models.EmailField(null=True, blank=True)
mailinglist_signup = models.URLField(null=True, blank=True)
is_open = models.BooleanField(default=True)
members = models.ManyToManyField(
settings.AUTH_USER_MODEL, through='GroupMembership'
)
class Meta:
verbose_name = 'group'
verbose_name_plural = 'groups'
def __str__(self):
return self.name
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('groups:detail', kwargs={'slug': self.slug})
@property
def admins(self):
return self.members.filter(memberships__is_admin=True)
class GroupMembership(TimestampedModel):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='memberships'
)
group = models.ForeignKey(
'Group',
related_name='memberships'
)
is_admin = models.BooleanField(
default=False
)
class Meta:
verbose_name = 'group membership'
verbose_name_plural = 'group memberships'
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,904 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/migrations/0005_auto_20141013_2035.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_groupmembership_is_admin'),
]
operations = [
migrations.AddField(
model_name='group',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 47, 764259), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='group',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 49, 212749), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='groupmembership',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 50, 687812), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='groupmembership',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 52, 44528), auto_now=True),
preserve_default=False,
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,905 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/admin.py | from django.contrib import admin
from . import models
class GroupMembershipInline(admin.TabularInline):
model = models.GroupMembership
class GroupAdmin(admin.ModelAdmin):
inlines = [GroupMembershipInline]
admin.site.register(models.Group, GroupAdmin)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,906 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/migrations/0009_auto_20141013_2035.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('resources', '0008_remove_resourcefile_title'),
]
operations = [
migrations.AddField(
model_name='resource',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 53, 657540), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='resource',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 55, 8608), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourcefile',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 56, 242466), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourcefile',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 57, 429585), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 34, 58, 750451), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 35, 0, 524252), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourceupvote',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 35, 2, 95237), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='resourceupvote',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 13, 20, 35, 3, 625203), auto_now=True),
preserve_default=False,
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,907 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/core/templatetags/markdown.py | import re
from django import template
from django.utils.safestring import mark_safe
import CommonMark
register = template.Library()
def convert_http_to_markdown_link(value):
urlfinder = re.compile(r'^(http(s*):\/\/\S+)')
urlfinder2 = re.compile(r'\s(http(s*):\/\/\S+)')
value = urlfinder.sub(r'<\1>', value)
return urlfinder2.sub(r' <\1>', value)
@register.filter()
def markdown(value):
value = convert_http_to_markdown_link(value)
parser = CommonMark.DocParser()
renderer = CommonMark.HTMLRenderer()
ast = parser.parse(value)
html = renderer.render(ast)
return mark_safe(html)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,908 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(verbose_name='last login', default=django.utils.timezone.now)),
('is_superuser', models.BooleanField(verbose_name='superuser status', default=False, help_text='Designates that this user has all permissions without explicitly assigning them.')),
('username', models.CharField(max_length=30, unique=True, verbose_name='username', help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(verbose_name='staff status', default=False, help_text='Designates whether the user can log into this admin site.')),
('is_active', models.BooleanField(verbose_name='active', default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True)),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True)),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=255)),
('slug', models.CharField(max_length=255, blank=True, null=True)),
('place', models.CharField(max_length=255)),
('datetime', models.DateTimeField()),
('description', models.TextField()),
('group', models.ForeignKey(to='auth.Group')),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'event',
'verbose_name_plural': 'events',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupDetail',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('description', models.TextField()),
('url', models.URLField(blank=True, null=True)),
('group', models.OneToOneField(to='auth.Group')),
],
options={
},
bases=(models.Model,),
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,909 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/urls.py | from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from . import views
urlpatterns = patterns('',
url(r'^(?P<slug>all)$', views.BlogIndex.as_view(), name='all'),
url(r'^(?P<slug>\S+)/create$', views.PostCreate.as_view(), name='post-create'),
url(r'^(?P<blog_slug>\S+)/(?P<slug>\S+)/update$', views.PostUpdate.as_view(), name='post-update'),
url(r'^(?P<blog_slug>\S+)/(?P<slug>\S+)/delete$', views.PostDelete.as_view(), name='post-delete'),
url(r'^(?P<blog_slug>\S+)/(?P<slug>\S+)$', views.PostDetail.as_view(), name='post-detail'),
url(r'^(?P<slug>\S+)/$', views.BlogDetail.as_view(), name='blog-detail'),
url(r'^$', RedirectView.as_view(url=reverse_lazy('blogs:all', kwargs={'slug': 'all'})), name='root'),
)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,910 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/models.py | from django.db import models
from django.conf import settings
from django_extensions.db.fields import AutoSlugField
from taggit.managers import TaggableManager
from .managers import PostQuerySet
class Blog(models.Model):
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title')
description = models.TextField(null=True, blank=True)
owners = models.ManyToManyField(settings.AUTH_USER_MODEL)
def __str__(self):
return self.title
class Post(models.Model):
objects = PostQuerySet.as_manager()
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
content = models.TextField()
blog = models.ForeignKey('Blog', related_name='posts')
tags = TaggableManager(blank=True)
public = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
class Meta:
verbose_name = 'post'
verbose_name_plural = 'posts'
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('blogs:post-detail', kwargs={'blog_slug':self.blog.slug, 'slug': self.slug})
def __str__(self):
return self.title
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,911 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/forms.py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from crispy_forms.bootstrap import StrictButton
from . import models
class EventForm(forms.ModelForm):
class Meta:
model = models.Event
exclude = ['created_at', 'updated_at', 'slug', 'owner']
def __init__(self, *args, user=None, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-10'
self.helper.form_method = 'POST'
field_list = [
'title', 'place', 'start', 'end', 'description', 'tags',
]
if user:
if len(user.group_set.all()) > 0:
self.fields['group'] = forms.ModelChoiceField(
queryset=user.group_set.all(),
required=False,
)
field_list.append('group')
field_list.append('public')
field_list.append(
Div(
StrictButton('Save', type='submit', css_class='btn-primary'),
css_class="text-right"
)
)
self.helper.layout = Layout(
*field_list
)
def clean(self):
cleaned_data = super().clean()
start = cleaned_data.get('start')
end = cleaned_data.get('end')
if end and end < start:
self.add_error('start', 'Start cannot be after end.')
self.add_error('end', 'End cannot be before start.')
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,912 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/forms.py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from crispy_forms.bootstrap import StrictButton
from . import models
class ResourceForm(forms.ModelForm):
class Meta:
model = models.Resource
exclude = ['created_at', 'updated_at', 'slug', 'owner']
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,913 | datalogisk-fagraad/dikultur | refs/heads/master | /dikultur/settings/base.py | import os
import environ
env = environ.Env(DEBUG=(bool, False),)
environ.Env.read_env()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
get_path = lambda x: os.path.join(BASE_DIR, x)
SECRET_KEY = env('SECRET_KEY')
DEBUG = env('DEBUG')
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = env('ALLOWED_HOSTS').split(',')
AUTH_USER_MODEL = 'core.User'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.facebook',
'opbeat.contrib.django',
'crispy_forms',
'sekizai',
'taggit',
'apps.groups',
'apps.core',
'apps.resources',
'apps.events',
'apps.blogs',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
'sekizai.context_processors.sekizai',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
SITE_ID = 1
ROOT_URLCONF = 'dikultur.urls'
WSGI_APPLICATION = 'dikultur.wsgi.application'
DATABASES = {
'default': env.db(),
}
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
LANGUAGE_CODE = env('LANGUAGE_CODE')
TIME_ZONE = env('TIME_ZONE')
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = get_path('media')
STATIC_URL = '/static/'
STATIC_ROOT = get_path('static')
TEMPLATE_DIRS = (
get_path('templates'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
ACCOUNT_USER_MODEL_USERNAME_FIELD = 'username'
ACCOUNT_USER_MODEL_EMAIL_FIELD = 'email'
OPBEAT = {
"ORGANIZATION_ID": env('OPBEAT_ORGANIZATION_ID'),
"APP_ID": env('OPBEAT_APP_ID'),
"SECRET_TOKEN": env('OPBEAT_SECRET_TOKEN'),
} | {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,914 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/forms.py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from crispy_forms.bootstrap import StrictButton
from . import models
class GroupForm(forms.ModelForm):
class Meta:
model = models.Group
exclude = ['created_at', 'updated_at', 'slug', 'members']
def __init__(self, *args, user=None, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-10'
self.helper.form_method = 'POST'
field_list = [
'name',
'description',
'website',
'email',
'mailinglist_signup',
'is_open',
Div(
StrictButton('Save', type='submit', css_class='btn-primary'),
css_class="text-right"
)
]
self.helper.layout = Layout(
*field_list
)
def save(self, commit=True):
created = True if self.instance.pk is None else False
instance = super().save(commit)
if created:
models.GroupMembership.objects.create(
group=instance,
user=self.user,
is_admin=True,
)
return instance
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,915 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/core/migrations/0003_auto_20140928_2043.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('core', '0002_auto_20140824_1039'),
]
operations = [
migrations.CreateModel(
name='GroupDetails',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('description', models.TextField()),
('url', models.URLField(null=True, blank=True)),
('group', models.OneToOneField(to='auth.Group')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='groupdetail',
name='group',
),
migrations.DeleteModel(
name='GroupDetail',
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,916 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/events/templatetags/events_tags.py | from django import template
register = template.Library()
@register.inclusion_tag('events/includes/event_list.html')
def event_list(events, profile_view=False):
return {
'events': events,
'profile_view': profile_view
} | {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,917 | datalogisk-fagraad/dikultur | refs/heads/master | /dikultur/urls.py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from apps.core.views import FrontPage, ProfileView, Logout, Login
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/profile/$', ProfileView.as_view(), name='profile'),
url(r'^accounts/logout/$', Logout.as_view(), name='account_logout'),
url(r'^accounts/login/$', Login.as_view(), name='account_login'),
url(r'^accounts/', include('allauth.urls')),
url(r'^resources/', include('apps.resources.urls',
namespace='resources',
app_name='resources')),
url(r'^events/', include('apps.events.urls',
namespace='events',
app_name='events')),
url(r'^groups/', include('apps.groups.urls',
namespace='groups',
app_name='groups')),
url(r'^blogs/', include('apps.blogs.urls',
namespace='blogs',
app_name='blogs')),
url(r'^$', FrontPage.as_view(), name='frontpage'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,918 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/groups/views.py | from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.views.generic import ListView, DetailView, View, UpdateView, \
CreateView
from django.core.urlresolvers import reverse
from . import models, forms
from apps.core.models import User
from ..core.views.mixins import LoginRequiredMixin
from ..events.models import Event
from ..events.utils import generate_ical
class GroupAdminRequiredMixin(LoginRequiredMixin):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'obj'):
self.obj = models.Group.objects.get(slug=kwargs.get('slug'))
if request.user not in self.obj.admins:
return HttpResponseRedirect(self.obj.get_absolute_url())
return super().dispatch(request, *args, **kwargs)
class GroupList(ListView):
template_name = 'groups/group_list.html'
context_object_name = 'groups'
queryset = models.Group.objects.all().order_by('name')
class GroupDetail(DetailView):
template_name = 'groups/group_detail.html'
context_object_name = 'group'
model = models.Group
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['future_events'] = Event.objects.filter(
group=self.object,
public=True,
start__gt=timezone.now()
)
return context
class GroupCreate(LoginRequiredMixin, CreateView):
template_name = 'groups/group_form.html'
model = models.Group
form_class = forms.GroupForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class GroupUpdate(GroupAdminRequiredMixin, UpdateView):
template_name = 'groups/group_form.html'
model = models.Group
form_class = forms.GroupForm
class GroupMembers(LoginRequiredMixin, DetailView):
template_name = 'groups/group_members.html'
model = models.Group
class GroupJoin(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
user = self.request.user
group = models.Group.objects.get(slug=kwargs.get('slug'))
models.GroupMembership.objects.create(user=user, group=group)
return HttpResponseRedirect(group.get_absolute_url())
class GroupMakeAdmin(GroupAdminRequiredMixin, View):
def get(self, *args, **kwargs):
user = User.objects.get(pk=kwargs.get('user_id'))
membership = models.GroupMembership.objects.get(
user=user, group=self.obj)
membership.is_admin = True
membership.save()
return HttpResponseRedirect(
reverse('groups:members', kwargs={'slug': self.obj.slug})
)
class GroupRemoveAdmin(GroupAdminRequiredMixin, View):
def get(self, *args, **kwargs):
user = User.objects.get(pk=kwargs.get('user_id'))
membership = models.GroupMembership.objects.get(
user=user, group=self.obj)
membership.is_admin = False
membership.save()
return HttpResponseRedirect(
reverse('groups:members', kwargs={'slug': self.obj.slug})
)
class GroupICal(View):
def get(self, *args, **kwargs):
cal = generate_ical(group__slug=kwargs.get('slug'))
return HttpResponse(content=cal.to_ical())
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,919 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/views.py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, View, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.utils import timezone
from . import models, forms
class BlogList(ListView):
template_name = 'blogs/blog_list.html'
queryset = models.Blog.objects.order_by('title')
class BlogDetail(DetailView):
template_name = 'blogs/blog_detail.html'
context_object_name = 'blog'
model = models.Blog
class PostCreate(CreateView):
template_name = 'blogs/post_form.html'
model = models.Post
form_class = forms.PostForm
def dispatch(self, request, *args, **kwargs):
slug = kwargs.get('slug', None)
self.blog = models.Blog.objects.get(slug=slug)
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = self.request.user
if user not in self.blog.owners.all():
return HttpResponse('Access denied')
return super().get(request, *args, **kwargs)
def form_valid(self, form):
form.instance.blog = self.blog
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['blog'] = self.blog
return kwargs
class PostDetail(DetailView):
template_name = 'blogs/post_detail.html'
context_object_name = 'post'
model = models.Post
class PostUpdate(UpdateView):
template_name = 'blogs/post_form.html'
model = models.Post
form_class = forms.PostForm
def dispatch(self, request, *args, **kwargs):
slug = kwargs.get('blog_slug', None)
self.blog = models.Blog.objects.get(slug=slug)
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = self.request.user
if not user in self.blog.owners.all():
return HttpResponse('Access denied')
return super().get(request, *args, **kwargs)
class PostDelete(DeleteView):
model = models.Post
context_object_name = 'post'
def delete(self, request, *args, **kwargs):
post = self.get_object()
post.is_deleted = True
post.save()
success_url = reverse_lazy(
'blogs:blog-detail',
kwargs={'slug': post.blog.slug})
return HttpResponseRedirect(success_url)
class BlogIndex(ListView):
template_name = 'blogs/blog_index.html'
context_object_name = 'posts'
def dispatch(self, request, *args, **kwargs):
self.slug = kwargs.get('slug', None)
return super().dispatch(request, *args, **kwargs)
def get_queryset(self, **kwargs):
queryset = models.Post.objects.public().order_by('-created_at')
print(queryset)
return queryset
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,920 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/blogs/forms.py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from crispy_forms.bootstrap import StrictButton
from . import models
class PostForm(forms.ModelForm):
class Meta:
model = models.Post
exclude = ['created_at', 'updated_at', 'slug', 'blog', ]
def __init__(self, *args, blog=None, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-10'
self.helper.form_method = 'POST'
field_list = [
'title', 'content', 'tags', 'public'
]
field_list.append(
Div(
StrictButton('Save', type='submit', css_class='btn-primary'),
css_class="text-right"
)
)
self.helper.layout = Layout(
*field_list
)
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,921 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/core/models/__init__.py | from .abstract import *
from django.contrib.auth import models as auth_models
class User(auth_models.AbstractUser):
"""
Placeholder so migrations won't break if a custom user is needed.
"""
pass
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,922 | datalogisk-fagraad/dikultur | refs/heads/master | /apps/resources/migrations/0005_auto_20140824_1235.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('resources', '0004_auto_20140824_1039'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='tags',
field=taggit.managers.TaggableManager(through='taggit.TaggedItem', verbose_name='Tags', to='taggit.Tag', blank=True, help_text='A comma-separated list of tags.'),
),
]
| {"/apps/resources/models.py": ["/apps/core/models/__init__.py"], "/apps/events/models.py": ["/apps/core/models/__init__.py", "/apps/groups/models.py"], "/apps/resources/admin.py": ["/apps/resources/models.py"], "/apps/core/views/__init__.py": ["/apps/events/models.py"], "/apps/events/views.py": ["/apps/events/utils.py"], "/apps/groups/models.py": ["/apps/core/models/__init__.py"], "/apps/blogs/models.py": ["/apps/blogs/managers.py"], "/dikultur/urls.py": ["/apps/core/views/__init__.py"], "/apps/groups/views.py": ["/apps/core/models/__init__.py", "/apps/events/models.py", "/apps/events/utils.py"]} |
45,939 | MAHines/CanvasMAH | refs/heads/main | /Scripts/uploadAssignmentOverrides.py | import mahCanvas
from datetime import datetime, date
import argparse
def main():
# Read in the arguments and validate
parser = argparse.ArgumentParser(description="Set due dates for multiple sections/students in Canvas assignment")
parser.add_argument('firstDate', type = str, help = 'Earliest due date for assignment. (e.g., 4/1/2021)')
args = parser.parse_args()
firstDate = args.firstDate
c = mahCanvas.mahCanvas()
c.uploadAssignmentOverrides(firstDate, overwrite = True, onlyThisTerm = True)
if __name__ == '__main__':
main()
| {"/Scripts/uploadAssignmentOverrides.py": ["/mahCanvas.py"], "/Scripts/downloadStudentList.py": ["/mahCanvas.py"]} |
45,940 | MAHines/CanvasMAH | refs/heads/main | /Scripts/downloadStudentList.py | import mahCanvas
def main():
c = mahCanvas.mahCanvas()
c.downloadStudentList(onlyThisTerm = True)
if __name__ == '__main__':
main()
| {"/Scripts/uploadAssignmentOverrides.py": ["/mahCanvas.py"], "/Scripts/downloadStudentList.py": ["/mahCanvas.py"]} |
45,941 | MAHines/CanvasMAH | refs/heads/main | /WatermarkReports/WatermarkReports.py | # Usage: python WatermarkReports gradebook.csv submissionsFolder
# This script prepares lab report submissions downloaded from Canvas for Gradescope upload.
# – Converts any .docx files to .pdf
# - Cleans all pdf's using mutool (scanned pdfs are particularly problematic)
# – Finds the length of the longest pdf
# – Gets student ID from Canvas filename, matches to student name in gradebook, and adds student name at top of first page
# – Adds vertical page numbers to both sides of each page to help Gradescope auto-assign pages.
# – Adjusts all pdf's to the same number of pages to help Gradescope auto-assign pages.
# – Produces file Outline.pdf which is used for preparing assignment in Gradescope
#
# This script requires the installation of mutool, a free command line pdf tool, using
# brew install mupdf-tools
# If you need to install brew first, follow the instructions under "Install Homebrew" at
# https://brew.sh
#
# This script requires the file 'Watermark.pdf' to be in the working directory. This file consists
# of a _scanned_ set of pages with numbers running down the right and left hand sides. It is
# important that this be a _scanned_ file for Gradescope matching. A pdf file with text does not work.
# I tried just having the numbers run down only the right hand side, but Gradescope would not auto-recognize
# this type of watermarking.
#
# This script assumes that the files to be processed are in the directory passed as the argument
# submissionsFolder. The script will only handle .docx and .pdf files. Any other files will
# be deleted with no warning.
#
# Some pdf files do not watermark properly. These files appear to have an opaque white background behind
# the text. A scanned file would probably not watermark correctly either.
from docx2pdf import convert
from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger
from fpdf import FPDF
import os
import pandas as pd
import sys
import argparse
import subprocess
import glob
def main():
# Read in the arguments and validate
parser = argparse.ArgumentParser(description="Prepare a folder of downloads from Canvas for upload to Gradescope")
parser.add_argument('gradesCSV', type = str, help = 'Path to gradebook in csv format')
parser.add_argument('subFolder', type = str, help = 'Path to folder of Canvas submissions')
args = parser.parse_args()
gradesCSV = args.gradesCSV
subFolder = args.subFolder
if not os.path.isfile(gradesCSV):
print(f'ERROR: The gradebook file {gradesCSV} does not exist.')
exit()
gradebookName, gradebookExtension = os.path.splitext(gradesCSV)
if not gradebookExtension == '.csv':
print(f'ERROR: The gradebook file {gradesCSV} should be a .csv file.')
exit()
if not os.path.isdir(subFolder):
print(f'ERROR: {subFolder} is not a valid directory.')
exit()
if not os.path.isfile('Watermark.pdf'):
print(f'ERROR: The file Watermark.pdf is not in the current directory.')
exit()
# Canvas gradebook csv's can have a variable number of header rows. Open the gradebook,
# count the number of lines that do not start with ", then close the gradebook
gradebookFile = open(gradesCSV, 'r')
text = gradebookFile.readlines()
cnt = 0;
for theLine in (text):
if theLine[0] == "\"":
break
else:
cnt += 1
gradebookFile.close()
# Read in the info in the gradebook so we can get names from IDs. Should probably add error checking.
# Unfortunately, cannot find documentation of file structure.
df = pd.read_csv(gradesCSV, usecols=[0,1], header=cnt-1)
df.columns = ['Name', 'ID']
# Open the Watermark file. This pdf file contains 30 pages with numbers running down both sides.
wm_file = open('Watermark.pdf', 'rb')
# Test for word files before firing up Word
cwd = os.getcwd()
os.chdir(subFolder)
types = ('*.doc', '*.docx')
wordFiles = []
for files in types:
wordFiles.extend(glob.glob(files))
if len(wordFiles) > 0:
os.chdir(cwd)
convert(subFolder) # Converts Word files to pdf
os.chdir(subFolder)
# "Clean" all of the pdfs using mutool, outputting to a temporary file
print("Cleaning pdfs.")
for fn in os.listdir():
if not fn.endswith('.pdf'):
os.remove(fn)
else:
fn_out = fn + '_out'
result = subprocess.run(["mutool", "clean", "-s", "-g", fn, fn_out])
# Delete all of the unclean files
for fn in os.listdir():
if fn.endswith('.pdf'):
os.remove(fn)
# Now reclean all of the files using mutool, outputting to the original file name
# This may no longer be necessary, but it is very fast, so what the hey…
print("Recleaning pdfs.")
for fn in os.listdir():
fn_out = fn[:-4]
result = subprocess.run(["mutool", "clean", "-s", "-g", fn, fn_out])
# Rename the cleaned files to the original name
for fn in os.listdir():
if fn.endswith('_out'):
os.remove(fn)
# Find the maximum number of pages across all files and delete non pdf
maxPages = 0
for fn in os.listdir():
if not fn.endswith('.pdf'):
os.remove(fn)
else:
with open(fn, "rb") as origReport_file:
origReport_reader = PdfFileReader(origReport_file, strict = False)
numPages = origReport_reader.getNumPages()
if numPages > maxPages:
maxPages = numPages
# Now make the Outline.pdf, which consists of watermarked pages. We are going to have a problem
# with files that are longer than the watermark file. My solution is just not to watermark
# the excess pages. This may cause Gradescope to get confused, but I doubt it.
wm_reader = PdfFileReader(wm_file)
if maxPages > wm_reader.getNumPages():
maxPages = wm_reader.getNumPages()
if maxPages > 24: # For unknown reasons, Gradescope does not like more than 24 pages using this approach
maxPages = 24
print(f'All reports will be lengthened to {maxPages} pages.')
os.chdir(cwd)
with open('Outline.pdf', 'wb') as outline_file:
outline_writer = PdfFileWriter()
for i in range(maxPages):
outline_writer.addPage(wm_reader.getPage(i))
outline_writer.write(outline_file)
outline_file.close()
os.chdir(subFolder)
# Loop through every file in the submissionsFolder directory, processing each
for fn in os.listdir():
if fn.endswith('.pdf'):
# Find student ID from Canvas filename
fnParts = fn.split('_')
i = 0
while not fnParts[i].isnumeric():
i += 1
studentID = int(fnParts[i])
# Extract name from database
if len(df.loc[df['ID']==studentID]) > 0:
nameParts = df.iloc[df.loc[df['ID']==studentID].index.values[0],0].split(',')
fullName = nameParts[1] + ' ' + nameParts[0]
print('Processing ' + fullName + '…')
# Make cover page 'cover.pdf' with students name in upper left hand corner
# Arial bold seemed to have the best OCR of the fonts readily available to FPDF
coverPDF = FPDF('P', 'mm', 'Letter')
coverPDF.add_page()
coverPDF.set_font("Arial", style = 'B',size = 16)
coverPDF.cell(0, 0, fullName,ln = 1, align = 'L')
coverPDF.output('cover.pdf')
# Open the output file 'Output.pdf' then start processing
with open('Output.pdf', 'wb') as output_file:
outReport_writer = PdfFileWriter()
wm_reader = PdfFileReader(wm_file)
# Open the watermark pdf, then merge the report and the cover page (for page 1)
# If the report is longer than the watermark pdf, just tack the excess pages
# on the end. The output is put into Output.pdf
with open('cover.pdf', 'rb') as cover_file:
cover_reader = PdfFileReader(cover_file)
with open(fn, 'rb') as origReport_file:
origReport_reader = PdfFileReader(origReport_file, strict = False)
origPages = origReport_reader.getNumPages()
for i in range(maxPages):
pdf_page = wm_reader.getPage(i)
if i < origPages:
pdf_page.mergePage(origReport_reader.getPage(i))
if i == 0:
pdf_page.mergePage(cover_reader.getPage(0))
outReport_writer.addPage(pdf_page)
if origPages > maxPages: # Handle the extra long reports here
for i in range(maxPages, origPages):
outReport_writer.addPage(origReport_reader.getPage(i))
outReport_writer.write(output_file)
output_file.close()
origReport_file.close()
cover_file.close()
# Remove the cover page, which is no longer needed.
os.remove('cover.pdf')
# Rename Output.pdf to the original filename.
os.rename('Output.pdf', fn)
else:
print(f'The student ID {studentID} does not exist.')
wm_file.close()
# The following function merges all PDFs in the current directory into some number of merged PDFs
# with names Merge0.pdf, Merge1.pdf, etc. The variable numPerFile determines how many files
# are included in each merged file. This function is not currently used.
# Unfortunately, merged files confuse Gradescope's processing engine for reasons that I do
# understand
def MergePDFsInDirectory():
cnt = 0
mergeNum = 0
numPerFile = 200
mergeName = 'Merge_' + str(mergeNum) + '.pdf'
merger = PdfFileMerger()
for fn in os.listdir():
if fn.endswith('.pdf'):
merger.append(fn)
cnt += 1
if cnt > numPerFile:
merger.write(mergeName)
merger.close()
merger = PdfFileMerger()
mergeNum += 1
mergeName = 'Merge_' + str(mergeNum) + '.pdf'
cnt = 0
merger.write(mergeName)
merger.close()
if __name__ == '__main__':
main()
| {"/Scripts/uploadAssignmentOverrides.py": ["/mahCanvas.py"], "/Scripts/downloadStudentList.py": ["/mahCanvas.py"]} |
45,942 | MAHines/CanvasMAH | refs/heads/main | /Scripts/CombineGradescopeCSVs.py | import os
import glob
import pandas as pd
from bullet import Bullet # pip install bullet
# This script reads in all of the Gradescope data from all of the csv's in the cwd except 'allGrades.csv,'
# then combines these data into a single csv containing all of the grades, 'allGrades.csv' The script
# also calculates a total grade for each experiment that has a graded report.
def setColumnOrder(allHeaders):
firstHeaders = ['Last_Name', 'First_Name', 'SID']
otherHeaders = list(set(allHeaders) - set(firstHeaders))
otherHeaders.sort()
newHeaders = firstHeaders + otherHeaders
return newHeaders
def main():
cwd = os.getcwd()
# Get roster from main file
files = glob.glob("*.csv")
cli = Bullet("Choose main file", files, margin=3)
rosterFile = cli.launch()
# The main file is used to read first name, last name, and SID. All files are indexed by
# Email, as this is the only info that is constant across our Gradescope courses.
roster = pd.read_csv(
os.path.join(os.getcwd(), rosterFile),
converters={"Email": str.lower},
usecols=["First Name", "Last Name", "SID", "Email"],
index_col="Email",
)
all_files = glob.glob(os.path.join(cwd, "*.csv"))
oldOutput = glob.glob(os.path.join(cwd, "allGrades.csv"))
if oldOutput in all_files:
all_files.remove(oldOutput)
# The Gradescope csv's have a bunch of columns that are not useful to us. We avoid loading that
# info using the info in sub_strings and usecols
sub_strings = ['Submission', 'Max', 'Lateness','First Name', 'Last Name', 'SID', 'section_name']
usecols = lambda x: not any(s in x for s in sub_strings)
for filename in all_files:
newDf = pd.read_csv(filename,
converters={"Email": str.lower},
usecols = usecols,
index_col='Email'
)
roster = roster.combine_first(newDf)
# Clean up the column names, replacing spaces with _ and removing :
roster.columns = roster.columns.str.replace(' ', '_')
roster.columns = roster.columns.str.replace(':', '')
# Loop through the experiments. If all 3 pieces of the expt have Gradescope records, combine them
# In doing this, skip any students w/o a report grade, but consider missing prelabs/notebooks as 0
# The script assumes the exp parts have names 'ExpN: blahblah_Notebook', 'ExpN: blahblah_Prelab',
# and 'ExpN: blahblah_Report' where N is an integer.
roster = roster[setColumnOrder(list(roster.columns))] # Set column order
allHeaders = list(roster.columns)
for i in range(1, 9): # There are only 7 labs, but this might be used elsewhere
expPrefix = 'Exp' + str(i)
expParts = [x for x in allHeaders if x.startswith(expPrefix)]
if len(expParts) > 2:
expTotal = expParts[0].replace('_Notebook','') + '_Total'
roster[expTotal] = roster[expParts[0]].fillna(0) + roster[expParts[1]].fillna(0) + roster[expParts[2]]
# Reorder the columns one more time because of the new lab totals.
roster = roster[setColumnOrder(list(roster.columns))] # Set column order
roster.to_csv("allGrades.csv", index=True)
if __name__ == '__main__':
main()
| {"/Scripts/uploadAssignmentOverrides.py": ["/mahCanvas.py"], "/Scripts/downloadStudentList.py": ["/mahCanvas.py"]} |
45,943 | MAHines/CanvasMAH | refs/heads/main | /mahCanvas.py | # Class for accessing Canvas programatically
# Useful resources include:
# https://canvasapi.readthedocs.io/en/stable/getting-started.html
# https://github.com/ucfopen/canvasapi
# https://canvas.instructure.com/doc/api/assignments.html
# https://github.com/dsavransky/grading
from datetime import datetime, date, timedelta, timezone
from dateutil import parser
import os
import numpy as np # Install with Anaconda
import pandas as pd # Install with Anaconda, also installs numpy I think
from canvasapi import Canvas # pip install canvasapi
import keyring # pip install keyring
from bullet import Bullet # pip install bullet
from pytz import reference
import getpass
# -----------------------------------------------------------------------------------
#
# Helper functions
#
# -----------------------------------------------------------------------------------
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def local_to_utc(local_dt):
return local_dt.replace(tzinfo=None).astimezone(tz=timezone.utc)
class mahCanvas:
# -----------------------------------------------------------------------------------
#
# Accessing Canvas
#
# -----------------------------------------------------------------------------------
def __init__(self):
# Logs on to Canvas using token stored in system keychain. If this is the first log in
# on this computer, you will be prompted to enter your Canvas token. To get this
# go to Account > Settings in Canvas, and click on New Access Token. Copy the token
# and enter it. Getting a new token invalidates the old token.
canvas_token_file = None
canvasURL = "https://canvas.cornell.edu"
canvasTokenName = 'Cornell_Canvas_Token'
token = keyring.get_password(canvasTokenName, "canvas")
if token is None:
if canvas_token_file is None:
token = getpass.getpass("Enter canvas token:\n")
else:
with open(canvas_token_file, "r") as f:
tmp = f.read()
token = tmp.strip()
try:
canvas = Canvas(canvasURL, token)
canvas.get_current_user()
keyring.set_password(canvasTokenName, "canvas", token)
print("Connected. Token Saved")
except InvalidAccessToken:
print("Could not connect. Token not saved.")
else:
canvas = Canvas(canvasURL, token)
canvas.get_current_user()
print("Connected to Canvas.")
self.canvas = canvas
# -----------------------------------------------------------------------------------
#
# Accessing Courses and Assignments
#
# -----------------------------------------------------------------------------------
def listCourses(self, onlyThisTerm = True):
# Returns a list of courses to which the current user has access. By default, only courses
# from the current semester are returned. Pass onlyThisTerm = False to get all courses.
courses = self.canvas.get_courses(include=["term"])
present = datetime.now()
courseStrs = []
courseNums = []
curTerm = self.currentTerm()
for course in courses:
if not onlyThisTerm or course.term['name'] == curTerm:
courseStrs.append(str(course))
courseNums.append(course.id)
return courseStrs, courseNums
def listAssignments(self):
# Returns a list of assignments from the current course (self.course)
if not hasattr(self,'course'):
print('\a')
print('You need to choose a course before choosing an assignment. Exiting now.')
return -1
assigns = self.course.get_assignments()
assignNames = []
assignIDs = []
for assign in assigns:
assignNames.append(str(assign))
assignIDs.append(assign.id)
return assignNames, assignIDs
def chooseCourse(self, onlyThisTerm):
# Interactively choose a course from Canvas
strs, ids = self.listCourses(onlyThisTerm)
cli = Bullet("Choose course", strs, margin=3, return_index=True)
_, idx = cli.launch()
return ids[idx]
def chooseAssignment(self):
# Interactively choose assignment from current course
strs, ids = self.listAssignments()
cli = Bullet("Choose assignment", strs, margin=3, return_index=True)
_, idx = cli.launch()
return ids[idx]
def currentTerm(self):
# Guesses the current semester based on today's date. Bit of a kludge
today = date.today()
springEnd = datetime.strptime('May 30', '%b %d').date().replace(year=today.year)
summerEnd = datetime.strptime('Aug 15', '%b %d').date().replace(year=today.year)
if today < springEnd:
term = 'Spring'
elif today < summerEnd:
term = 'Summer'
else:
term = 'Fall'
return term + ' ' + str(today.year)
# -----------------------------------------------------------------------------------
#
# Setting Due Dates (Assignment Overrides) for Sections and/or Individual Students
#
# -----------------------------------------------------------------------------------
def downloadStudentList(self, onlyThisTerm = True):
# Downloads a list of students and their Canvas IDs for a specific course and outputs to csv
courseNum = self.chooseCourse(onlyThisTerm)
self.course = self.canvas.get_course(courseNum)
baseName = self.course.course_code
students = self.course.get_users(enrollment_type=['student'], include=["enrollments"])
studentList = []
for student in students:
data = {'Name' : student.sortable_name,
'studentID' : student.id}
studentList.append(data)
if len(studentList) > 0:
fileName = baseName + 'students.csv'
studentList = pd.DataFrame(studentList)
studentList.to_csv(fileName, index=False)
def downloadAssignmentOverrides(self, onlyThisTerm = True):
# Downloads the "override" due dates from an assignment in a course
# Separate csv's are output for section overrides and student overrides
courseNum = self.chooseCourse(onlyThisTerm)
self.course = self.canvas.get_course(courseNum)
assignmentNum = self.chooseAssignment()
assignment = self.course.get_assignment(assignmentNum)
overrides = assignment.get_overrides()
baseName = self.course.course_code
studentList = []
sectionList = []
# Find the earliest section due date
earliestDate = date(2099, 1, 1)
for override in overrides:
if hasattr(override,'course_section_id'): # Section override?
due_at = utc_to_local(datetime.strptime(override.due_at,"%Y-%m-%dT%H:%M:%SZ"))
if due_at.date() < earliestDate:
earliestDate = due_at.date()
for override in overrides:
if hasattr(override, 'student_ids'): # Student override?
for id in override.student_ids:
student = self.course.get_user(id)
due_at = utc_to_local(datetime.strptime(override.due_at,"%Y-%m-%dT%H:%M:%SZ"))
data = {'Name' : student.sortable_name,
'studentID' : id,
'due_date' : due_at.strftime('%m/%d/%Y'),
'due_time' : due_at.strftime('%H:%M') }
studentList.append(data)
elif hasattr(override,'course_section_id'): # Section override?
due_at = utc_to_local(datetime.strptime(override.due_at,"%Y-%m-%dT%H:%M:%SZ"))
data = {'Section' : override.title,
'course_section_id' : override.course_section_id,
'delta_date' : (due_at.date() - earliestDate).days,
'due_time' : due_at.strftime('%H:%M') }
sectionList.append(data)
if len(sectionList) > 0:
fileName = baseName + 'sectionOverrides.csv'
sectionList = pd.DataFrame(sectionList)
sectionList.to_csv(fileName, index=False)
if len(studentList) > 0:
fileName = baseName + 'studentOverrides.csv'
studentList = pd.DataFrame(studentList)
studentList.to_csv(fileName, index=False)
def uploadAssignmentOverrides(self, earliestDate, overwrite = False, onlyThisTerm = True):
# Uploads the "override" due dates for an assignment in a course from one or two csv's
# You must supply the earliest date for the assignment
firstDay = parser.parse(earliestDate)
# Use interactive lists to get course and assignment
courseNum = self.chooseCourse(onlyThisTerm)
self.course = self.canvas.get_course(courseNum)
assignmentNum = self.chooseAssignment()
assignment = self.course.get_assignment(assignmentNum)
baseName = self.course.course_code
# If overwriting, erase existing overrides
if overwrite:
overrides = assignment.get_overrides()
for override in overrides:
override.delete()
# Process student overrides
fileName = baseName + 'studentOverrides.csv'
if os.path.isfile(fileName):
# Process student overrides by building a dictionary of all of the exceptions
# Use the due_at as the key for the dictionary
print(f'Processing student overrides from {fileName}.')
studentOverrides = {}
df = pd.read_csv(fileName)
for row in df.itertuples(index=True, name='Pandas'):
print(row.Name, row.studentID, row.due_date, row.due_time)
# Make sure student number and name match
try:
student = self.course.get_user(row.studentID)
except:
print('\a')
print(f'Error in file {fileName}! Student with the id {row.studentID} is not enrolled in course.')
print('Exiting now. You need to fix the file before proceeding.')
return -1
if student.sortable_name != row.Name:
print('\a')
print(f'Error in file {fileName}! {row.Name} does not match {student.sortable_name}.')
print('Check that the ID number matches the name.')
return -1
# Use the due date and time as the key to the dictionary
d = parser.parse(row.due_date + ' ' + row.due_time)
studentOverrides.setdefault(d,[]).append(row.studentID)
# Now create all of the student exceptions and upload
for key, value in studentOverrides.items():
override = {'student_ids' : value,
'due_at': key}
try:
assignment.create_override(assignment_override = override)
except:
print('\a')
print("Request failed: Overwriteing an existing student override? Use overwrite = True.")
return -1
# Process section overrides
fileName = baseName + 'sectionOverrides.csv'
if os.path.isfile(fileName):
print(f'Processing section overrides from {fileName}.')
df = pd.read_csv(fileName)
# Get the lab sections for use in validation
labSections = {}
for section in self.course.get_sections():
if section.name.startswith('LAB'):
labSections[section.id] = section.name
for row in df.itertuples(index=True, name='Pandas'):
print(row.Section, row.course_section_id, row.delta_date, row.due_time)
# Make sure section number and name match
if row.course_section_id in labSections:
if labSections[row.course_section_id] != row.Section:
print('\a')
print(f'Error in file {fileName}! Section with the id {row.course_section_id} is not named {row.Section}.')
print('Exiting now. You need to fix the file before proceeding.')
return -1
else:
print('\a')
print(f'Error in file {fileName}! No section with ID {row.course_section_id} in course.')
print('Exiting now. You need to fix the file before proceeding.')
return -1
dueDate = (firstDay + timedelta(days = row.delta_date)).date().strftime('%m/%d/%Y')
d = parser.parse(dueDate + ' ' + row.due_time)
override = {'course_section_id' : row.course_section_id,
'due_at' : d}
try:
assignment.create_override(assignment_override = override)
except:
print('\a')
print("Request failed: Overwriteing an existing section override? Use overwrite = True.")
return -1
# --------------------------------- Code below this line is not currently used -------------------------------------------- #
def loadCourseAndLabs(self, courseNum):
assert isinstance(courseNum, int), "courseNum must be an int"
# Get the course
course = self.canvas.get_course(courseNum)
# Make a dictionary to hold the lab sections
labIDs = {}
labSections = {}
discIDs = {}
discSections = {}
for section in course.get_sections():
if section.name.startswith('LAB'):
labIDs[section.name] = section.id
labSections[section.id] = section.name
elif section.name.startswith('DIS'):
discIDs[section.name] = section.id
discSections[section.id] = section.name
print(f'Found {len(labSections)} lab sections.')
# Now add missing cases
labIDs['None'] = -1
labSections[-1] = 'None'
discIDs['None'] = -1
discSections[-1] = 'None'
students = course.get_users(enrollment_type=['student'], include=["enrollments", "test_student"])
names = []
IDs = []
netIDs = []
labs = []
discs = []
for student in students:
names.append(student.sortable_name)
IDs.append(student.id)
netIDs.append(student.login_id)
# Each student will be enrolled in a lecture, section, and at least one lab.
# We will discard the online lab section in an in-person exists
tempLab = -1
tempDisc = -1
for i in range(len(student.enrollments)):
if student.enrollments[i].get('course_section_id') in labSections:
if tempLab > 0:
if tempLab == 34798: # Online lab
tempLab = student.enrollments[i].get('course_section_id')
else:
tempLab = student.enrollments[i].get('course_section_id')
elif student.enrollments[i].get('course_section_id') in discSections:
tempDisc = student.enrollments[i].get('course_section_id')
labs.append(tempLab)
discs.append(tempDisc)
names = np.array(names)
IDs = np.array(IDs)
netIDs = np.array(netIDs)
labs = np.array(labs)
discs = np.array(discs)
# One entry for every student
self.course = course
self.names = names
self.IDs = IDs
self.netIDs = netIDs
self.labs = labs
self.discs = discs
# Used to translate between Canvas names and IDs
self.labIDs = labIDs
self.labSections = labSections
self.discIDs = discIDs
self.discSections = discSections
self.courseName = course.name
self.courseNum = courseNum
def outputSpreadsheet(self, courseNum):
self.loadCourseAndLabs(courseNum)
df = pd.DataFrame({"Name" : self.names, "ID" : self.IDs, 'netID' : self.netIDs, 'Lab' : self.labs, 'Disc' : self.discs})
df.to_csv("course.csv", index=False)
def loadCourse(self, courseNum):
assert isinstance(courseNum, int), "courseNum must be an int"
# get the course
course = self.canvas.get_course(courseNum)
tmp = course.get_users(include=["enrollments", "test_student"])
theNames = []
IDs = []
netIDs = []
for t in tmp:
isstudent = False
for e in t.enrollments:
if e["course_id"] == courseNum:
isstudent = e["role"] == "StudentEnrollment"
if isstudent:
theNames.append(t.sortable_name)
IDs.append(t.id)
netIDs.append(t.login_id)
theNames = np.array(theNames)
IDs = np.array(IDs)
netIDs = np.array(netIDs)
self.course = course
self.theNames = theNames
self.IDs = IDs
self.netIDs = netIDs
self.coursename = course.name
def studentName(self, studentID):
assert isinstance(studentID, int), "studentID must be an int"
loc = np.where(self.IDs == studentID)[0]
if loc.size == 0:
return None
else:
return self.names[loc[0]]
| {"/Scripts/uploadAssignmentOverrides.py": ["/mahCanvas.py"], "/Scripts/downloadStudentList.py": ["/mahCanvas.py"]} |
45,945 | JaimeVRodriguez/Coffee_Chatbot | refs/heads/master | /functions.py | def get_size():
res = input('What size drink can I get for you?\n[a] Small \n[b] Medium \n[c] Large \n> ')
if res == 'a':
return 'Small'
elif res == 'b':
return 'Medium'
elif res == 'c':
return 'Large'
else:
print_message()
return get_size()
def get_drink_type():
res = input('What type of drink would you like?\n[a] Brewed Coffee \n[b] Mocha \n[c] Latte \n> ')
if res == 'a':
return order_brew()
elif res == 'b':
return order_mocha()
elif res == 'c':
return order_latte()
else:
print_message()
return get_drink_type()
def get_cup_type():
res = input('What type of cup would you like?\n[a] Plastic Cup \n[b] Reusable Cup \n> ')
if res == 'a':
return 'Plastic Cup'
elif res == 'b':
return 'Reusable Cup'
else:
print_message()
return cup_type()
def order_latte():
res = input('And what kind of milk for your latte?\n[a] 2% milk \n[b] Non-fat milk \n[c] Soy milk \n> ')
if res == 'a':
return 'Latte'
elif res == 'b':
return 'Non-fat Latte'
elif res == 'c':
return 'Soy Latte'
else:
print_message()
return order_latte()
def order_brew():
res = input('And what kind of brewed coffee?\n[a] Hot \n[b] Iced \n> ')
if res == 'a':
return 'Hot Brewed Coffee'
elif res == 'b':
return 'Iced Brewed Coffee'
else:
print_message()
return order_brew()
def order_mocha():
while True:
res = input('Would you like to try our limited-edition peppermint mocha?\n[a] Sure! \n[b] Maybe next time! \n> ')
if res == 'a':
return 'Peppermint Mocha'
elif res == 'b':
return 'Mocha'
print_message()
def print_message():
print("I'm sorry, I did not understand your selection. Please enter the cooresponding letter for your response.")
| {"/coffee_chatbot.py": ["/functions.py"]} |
45,946 | JaimeVRodriguez/Coffee_Chatbot | refs/heads/master | /coffee_chatbot.py | from functions import get_size, get_drink_type, get_cup_type, order_latte, order_brew, print_message
def coffee_bot():
print('\n\n\n')
print('\t\t\t****************************\t\t\t')
print('\t\t\t** Welcome to the cafe! **')
print('\t\t\t****************************\t\t\t')
print('\n\n\n')
order_drink = 'y'
drinks = []
while order_drink == 'y':
size = get_size()
drink_type = get_drink_type()
cup_type = get_cup_type()
drink = '{} {}'.format(size, drink_type)
order_message = "Alright, that's a {} in a {}!".format(drink, cup_type)
print(order_message)
drinks.append(drink)
while True:
order_drink = input('Would you like to order another drink?\n[y] \n[n] \n> ')
if order_drink == 'y' or order_drink == 'n':
break
print('Okay, so I have:')
for drink in drinks:
print('- {}'.format(drink))
name = input('Can I get your name please? \n> ')
thank_message = 'Thanks, {}! Your drink will be ready shortly.'.format(name)
print(thank_message)
# Run coffee bot
coffee_bot()
| {"/coffee_chatbot.py": ["/functions.py"]} |
45,954 | rgienko/appealsMasterv2 | refs/heads/master | /app/migrations/0006_auto_20200414_1618.py | # Generated by Django 2.1.15 on 2020-04-14 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20200413_2217'),
]
operations = [
migrations.AlterField(
model_name='critical_dates_master',
name='case_number',
field=models.CharField(max_length=7),
),
]
| {"/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/app/views.py"]} |
45,955 | rgienko/appealsMasterv2 | refs/heads/master | /app/forms.py | from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms import ModelForm, Textarea, DateField, CheckboxInput, TextInput
from .models import appeal_master, critical_dates_master, provider_master, issue_master, parent_master, file_storage
from tinymce.widgets import TinyMCE
from django.db.models import Avg, Sum
from datetime import datetime
class make_dir_form(forms.Form):
types = [
('INDIVIDUAL', 'Individual'),
('GROUP', 'Group')
]
type = forms.ChoiceField(choices=types)
parent = forms.ModelChoiceField(queryset=parent_master.objects.only('parent_id'))
p_num = forms.CharField(max_length=7, required=False)
issue = forms.ModelChoiceField(queryset=issue_master.objects.only('abbreviation'), required=False)
fy = forms.IntegerField()
c_num = forms.CharField(max_length=7)
class group_form_form(forms.Form):
fy = forms.ModelChoiceField(queryset=provider_master.objects.only('fiscal_year'))
parent = forms.ModelChoiceField(queryset=parent_master.objects.only('parent_id'))
issue = forms.ModelChoiceField(queryset=issue_master.objects.only('issue_id'))
class CalendarEventForm(forms.Form):
subject = forms.CharField()
content = forms.CharField()
start = forms.DateTimeField()
end = forms.DateTimeField()
location = forms.CharField()
is_all_day = forms.BooleanField(required = False)
class add_issue(ModelForm):
class Meta:
model = provider_master
fields = [
'provider_number',
'fiscal_year',
'npr_date',
'receipt_date',
'was_added',
'issue_id',
'audit_adjustments',
'charge_id',
'amount',
'sri_staff_id',
'active_in_appeal_field',
'provider_specific_note'
]
labels = {
'provider_number':_('Provider Number:'),
'fiscal_year':_('Fiscal Year:'),
'npr_date':_('NPR Date:'),
'receipt_date': _('Reciept Date:'),
'was_added': _('Was Added:'),
'issue_id': _('Issue:'),
'audit_adjustments': _('Audit Adjustments:'),
'charge_id':_('Code:'),
'amount':_('Amount:'),
'sri_staff_id':_('SRG Staff:'),
'active_in_appeal_field':_('Active:'),
'provider_specific_note':_('Provider Specific Note:')
}
widgets = {
'was_added': CheckboxInput(attrs={'class': 'checkbox'}),
'active_in_appeal_field': CheckboxInput(attrs={'class': 'checkbox'}),
'provider_specific_note': Textarea(attrs={'cols':75, 'rows':5})
}
class new_appeal_master_form(ModelForm):
class Meta:
model = appeal_master
fields = ['case_number', 'rep_id', 'fi_id', 'prrb_contact_id', 'status_id', 'appeal_name', 'structure','is_ffy']
labels = {
'case_number':_('Case Number:'),
'rep_id':_('SRG Representative:'),
'fi_id':_('Intermediary (MAC):'),
'prrb_contact_id':_('PRRB Representative'),
'status_id':_('Appeal Status:'),
'appeal_name':_('Appeal Name:'),
'structure':_('Structure:'),
'is_ffy':_('FFY?')
}
widgets = {
'is_ffy': CheckboxInput(attrs={'class': 'checkbox'}),
'appeal_name': TextInput(attrs={'size': '75'}),
}
class new_issue_master_form(ModelForm):
class Meta:
model = issue_master
fields = ['issue_id', 'issue', 'rep_id','abbreviation','is_groupable', 'short_description','long_description']
labels = {
'issue_id':_('Issue ID:'),
'rep_id':_('SRG Representative:'),
'abbreviation':_('Issue abbreviation:'),
'short_description':_('Short Description'),
'long_description':_('Long Description')
}
widgets = {
'issue': TextInput(attrs={'size':'75'}),
'is_groupable': CheckboxInput(attrs={'class':'checkbox'}),
'short_description': Textarea(attrs={'cols':85, 'rows':5}),
'long_description': Textarea(attrs={'cols':85, 'rows':10})
}
class acknowledge_case_form(forms.Form):
acknowledged_date = forms.DateField()
def clean_acknowledged_date(self):
data = self.cleaned_data['acknowledged_date']
return data
class add_parent_form(ModelForm):
class Meta:
model = parent_master
fields = ['parent_id', 'parent_full_name', 'corp_contact_first_name', 'corp_contact_last_name', 'corp_contact_street', 'corp_contact_city', 'corp_contact_state_id', 'corp_contact_zip', 'corp_contact_phone', 'corp_contact_email']
labels = {
'parent_id': _('Parent ID:'),
'parent_full_name': _('Parent Full Name:'),
'corp_contact_first_name': _('First Name:'),
'corp_contact_last_name': _('Last Name:'),
'corp_contact_street': _('Street:'),
'corp_contact_city': _('City:'),
'corp_contact_state_id': _('State:'),
'corp_contact_zip': _('Zip Code:'),
'corp_contact_phone': _('Phone'),
'corp_contact_email': _('Email')
}
class transfer_issue_form(forms.Form):
to_case = forms.ModelChoiceField(queryset=appeal_master.objects.only('case_number'))
to_date = forms.DateField()
def clean_to_date(self):
data = self.cleaned_data['to_date']
return data
class add_critical_due_dates_form(ModelForm):
class Meta:
model = critical_dates_master
fields = ['critical_date', 'action_id']
class upload_case_file(ModelForm):
class Meta:
model = file_storage
fields = [
'file_type',
'file'
]
labels = {
'file_type': _('File Type:'),
'file': _('File:')
}
| {"/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/app/views.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.