index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
23,326,668
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/resources/UnderConstruction.py
|
from flask import make_response, render_template
from flask_restful import Resource
class UnderConstruction(Resource):
def get(self):
return make_response(render_template('under_construction.html'))
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,669
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/validation.py
|
import re
from flask_restful import reqparse
def register_validate():
parser_register = reqparse.RequestParser()
parser_register.add_argument('email', help='This field cannot be blank', required=True)
parser_register.add_argument('first_name', help='This field cannot be blank', required=True)
parser_register.add_argument('last_name', help='This field cannot be blank', required=True)
parser_register.add_argument('eir_code', help='This field cannot be blank', required=True)
parser_register.add_argument('password', help='This field cannot be blank', required=True)
return parser_register
def login_validate():
parser_register = reqparse.RequestParser()
parser_register.add_argument('username', help='This field cannot be blank', required=True)
parser_register.add_argument('password', help='This field cannot be blank', required=True)
return parser_register
def check_service_validate():
parser_register = reqparse.RequestParser()
parser_register.add_argument('eircode', help='This field cannot be blank', required=True)
return parser_register
def validate_email(email) -> bool:
pattern = re.compile("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
return bool(pattern.match(email))
def validate_eir(code) -> bool:
eir_code_pattern = re.compile("(?:^[AC-FHKNPRTV-Y][0-9]{2}|D6W)[ -]?[0-9AC-FHKNPRTV-Y]{4}$")
return bool(eir_code_pattern.match(code))
def category_create_validate():
parser_register = reqparse.RequestParser()
parser_register.add_argument('name', help='This field cannot be blank', required=True)
return parser_register
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,670
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/models/EthinicCategoryModel.py
|
import os
from datetime import datetime
from uuid import uuid4
import boto3
db = boto3.resource(service_name='dynamodb', region_name='eu-west-1',
aws_access_key_id=os.environ.get("aws_access_key_id"),
aws_secret_access_key=os.environ.get("aws_secret_access_key"))
class CategoryModel:
__tablename__ = "ethnicCategory"
id = str(uuid4())
name: str
stores = []
createdOn: str
updatedOn = datetime.now().isoformat()
def save_to_db(self):
db.Table(CategoryModel.__tablename__).put_item(Item={
'id': self.id,
'name': self.name,
'stores': self.stores,
'createdOn': self.createdOn,
'updatedOn': self.updatedOn
})
@staticmethod
def get_ethnic_category_by_id(_id):
res = db.Table(CategoryModel.__tablename__).get_item(Key={
'id': _id
})
return res['Item']
@staticmethod
def get_categories():
res = db.Table(CategoryModel.__tablename__).scan()
print(res)
return res['Items']
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,671
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/resources/UserLogoutRefresh.py
|
from flask_jwt_extended import (jwt_required, jwt_refresh_token_required,
get_raw_jwt)
from flask_restful import Resource
from models.RevokedTokenModel import RevokedTokenModel
class UserLogoutAccess(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel()
revoked_token.jti = jti
revoked_token.add()
return {'message': 'Access token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
class UserLogoutRefresh(Resource):
@jwt_refresh_token_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel()
revoked_token.jti = jti
revoked_token.add()
return {'message': 'Refresh token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,672
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/run.py
|
import os
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_restful import Api
from models.RevokedTokenModel import RevokedTokenModel
from resources.UnderConstruction import UnderConstruction
from resources.CheckService import TokenRefresh, CheckService
from resources.EthnicCategory import EthnicCategory, EthnicCategoryByID
from resources.MainRoute import MainRoute
from resources.UserLogin import UserLogin
from resources.UserLogoutAccess import UserLogoutAccess
from resources.UserLogoutRefresh import UserLogoutRefresh
from resources.UserRegistration import UserRegistration
application = app = Flask(__name__)
api = Api(app)
app.config['SECRET_KEY'] = os.environ.get('secret-key')
# @app.before_first_request
# def create_tables():
# UserModel.create_user_table()
# RevokedTokenModel.create_revoke_table()
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
jwt = JWTManager(app)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
app.config['PROPAGATE_EXCEPTIONS'] = True
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedTokenModel.is_jti_blacklisted(jti)
api.add_resource(UnderConstruction, '/')
api.add_resource(MainRoute, '/swagger')
api.add_resource(UserLogin, '/login')
api.add_resource(UserRegistration, '/register')
api.add_resource(UserLogoutAccess, '/logout/access')
api.add_resource(UserLogoutRefresh, '/logout/refresh')
api.add_resource(TokenRefresh, '/token/refresh')
api.add_resource(CheckService, '/check_service')
api.add_resource(EthnicCategory, '/ethnicCategory')
api.add_resource(EthnicCategoryByID, '/ethnicCategory/<string:_id>')
if __name__ == '__main__':
app.run(host="0.0.0.0", port=80, debug=False)
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,673
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/models/RevokedTokenModel.py
|
import os
from uuid import uuid4
import boto3
db = boto3.resource(service_name='dynamodb', region_name='eu-west-1',
aws_access_key_id=os.environ.get("aws_access_key_id"),
aws_secret_access_key=os.environ.get("aws_secret_access_key"))
class RevokedTokenModel:
__tablename__ = 'revoked_tokens'
id = uuid4()
jti = None
@staticmethod
def create_revoke_table():
try:
table = db.create_table(
TableName=RevokedTokenModel.__tablename__,
KeySchema=[
{
'AttributeName': 'jwt',
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': 'jwt',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
# Wait until the table exists.
table.meta.client.get_waiter('table_exists').wait(TableName=RevokedTokenModel.__tablename__)
except Exception as e:
print(e)
pass
def add(self):
db.Table(RevokedTokenModel.__tablename__).put_item(Item={
'jwt': self.jti,
'id': self.id
})
@classmethod
def is_jti_blacklisted(cls, jti):
response = db.Table(RevokedTokenModel.__tablename__).get_item(Key={
'jwt': jti,
})
return len(response) < 0
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,674
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/models/UserCart.py
|
import os
import boto3
db = boto3.resource(service_name='dynamodb', region_name='eu-west-1',
aws_access_key_id=os.environ.get("aws_access_key_id"),
aws_secret_access_key=os.environ.get("aws_secret_access_key"))
class UserCart:
__tablename__ = 'userCart'
id: str
products = []
total = 0.0
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,675
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/models/Store.py
|
import os
from uuid import uuid4
import boto3
db = boto3.resource(service_name='dynamodb', region_name='eu-west-1',
aws_access_key_id=os.environ.get("aws_access_key_id"),
aws_secret_access_key=os.environ.get("aws_secret_access_key"))
class Store:
id: str = uuid4()
name: str
eircode: str
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,676
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/models/Order.py
|
import os
from uuid import uuid4
import boto3
db = boto3.resource(service_name='dynamodb', region_name='eu-west-1',
aws_access_key_id=os.environ.get("aws_access_key_id"),
aws_secret_access_key=os.environ.get("aws_secret_access_key"))
class Order:
__tablename__ = 'order'
userID: str
orderID = uuid4()
products = []
total: int
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,677
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/resources/CheckService.py
|
import json
import os
import re
import requests
from flask_jwt_extended import (create_access_token, jwt_refresh_token_required,
get_jwt_identity)
from flask_restful import Resource
import validation
from validation import check_service_validate
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
return {'access_token': access_token}
class CheckService(Resource):
def post(self):
data = check_service_validate().parse_args()
if not re.match(validation.eir_code_pattern, data['eircode']):
return {'message': 'Invalid eircode'}, 422
url = "https://graphhopper.com/api/1/route?"
header = {
"Content-Type": "application/json"
}
params = {
"key": os.environ['grasshopper'],
}
data = {"points": [
[
-6.2782037,
53.3407837
],
[
-6.2656517,
53.3217209
]
],
"vehicle": "car",
"locale": "en",
"elevation": False,
"optimize": "false",
"calc_points": True,
"debug": False,
"points_encoded": True,
"ch.disable": True,
"weighting": "fastest"
}
route_request = requests.post(url=url, params=params, data=json.dumps(data), headers=header)
route_response = json.loads(route_request.text)
if int(route_response['paths'][0]['distance']) / 1000.0 < 4.0:
return {'message': 'Service available with in {} kms'.format(
int(route_response['paths'][0]['distance']) / 1000.0)}, 201
else:
return {'message': 'Service unavailable'}, 201
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,678
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/resources/MainRoute.py
|
from flask import render_template, make_response
from flask_restful import Resource
class MainRoute(Resource):
def get(self):
return make_response(render_template('swagger-ui.html'))
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,326,679
|
kush-singh-chb/FamilyCart
|
refs/heads/main
|
/resources/UserRegistration.py
|
import re
from flask_jwt_extended import (create_access_token, create_refresh_token)
from flask_restful import Resource
import validation
from models.UserModel import UserModel
from validation import register_validate
class UserRegistration(Resource):
def post(self):
data = register_validate().parse_args()
if not validation.validate_email(email=data['email']):
return {'message': 'Invalid Email'}, 422
if not re.match('^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[a-zA-Z]).{8,}$', data['password']):
return {'message': 'Stronger Password required'}, 422
if UserModel.check_by_username(data['email']):
return {'message': 'User {} already exists'.format(data['email'])}, 422
else:
new_user = UserModel()
new_user.username = data['email'],
new_user.firstname = data['first_name']
new_user.password = UserModel.generate_hash(data['password'])
try:
new_user.save_to_db()
access_token = create_access_token(identity=data['email'][0])
refresh_token = create_refresh_token(identity=data['email'][0])
return {
'message': 'User {} was created'.format(data['email']),
'access_token': access_token,
'refresh_token': refresh_token
}
except:
return {'message': 'Something went wrong'}, 500
|
{"/resources.py": ["/validation.py", "/models.py"], "/run.py": ["/models.py", "/resources.py", "/models/RevokedTokenModel.py", "/resources/UnderConstruction.py", "/resources/CheckService.py", "/resources/EthnicCategory.py", "/resources/MainRoute.py", "/resources/UserLogin.py", "/resources/UserLogoutAccess.py", "/resources/UserLogoutRefresh.py", "/resources/UserRegistration.py"], "/resources/EthnicCategory.py": ["/validation.py", "/models/EthinicCategoryModel.py"], "/resources/UserLogoutAccess.py": ["/models/RevokedTokenModel.py"], "/resources/UserLogin.py": ["/validation.py", "/models/UserModel.py"], "/resources/UserLogoutRefresh.py": ["/models/RevokedTokenModel.py"], "/resources/CheckService.py": ["/validation.py"], "/resources/UserRegistration.py": ["/validation.py", "/models/UserModel.py"]}
|
23,377,376
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Vectorizing/CountVectorizer.py
|
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
def getCountVectorizer(df : pd.DataFrame, textcolname : str):
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=10000)
count_vect.fit(df[textcolname])
print("count vectorizer done")
return count_vect
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,377
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/utils.py
|
from typing import List
BASE_CATEGORIES = ["Astrophisics", "Audio", "Sequential","Graphs", "Reinforcement Learning", "Natural Language Processing", "Computer Vision"]
def getCategories(base_categories: List[str], all_categories: List[str], additional_categories: List[str]) -> List[str]:
"""
Considering base_categories and all the category inputs, returns the final category list.
Params
-----------
base_categories: (List[str]) List of base categories.
all_categories: (List[str]) List of all categories. If evaluated to True, this list will be returned.
additional_categories: (List[str]) List of additional categories. Added to base_categories if given and all_categories is evaluated to False.
Return
-----------
(List[str]) List of final categories.
"""
if all_categories:
return all_categories.copy()
elif additional_categories:
return base_categories.copy() + additional_categories.copy()
else:
return base_categories.copy()
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,378
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/histograms.py
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
df = pd.read_csv('data/readme_new_preprocessed_train.csv', sep=';')
#df.drop_duplicates('Text', inplace=True, keep=False)
print(df.shape)
#df = pd.read_csv('data/readme.csv', sep=';')
df.drop('Text', axis=1, inplace=True)
distribution = df.groupby('Label').count()
distribution.sort_values('Repo', ascending=[False], inplace=True)
print(distribution)
labels = [key for key, _ in distribution.iterrows()]
print(labels)
vals = distribution['Repo'].values
print(vals)
plt.figure(figsize=(30,15))
plt.bar(labels, vals, color='#00c7c3')
plt.title(f'Distribution of samples between categories', size=18, weight='bold')
plt.xlabel('Categories', size=16, weight='bold')
plt.ylabel('Frequency', size=16, weight='bold')
plt.xticks(rotation=0, size=16)
plt.yticks(size=14)
plt.legend(prop={'size': 14})
plt.savefig('results/pics/class_distribution_unique_train.png')
plt.close()
exit(0)
#plt.xticks(fontsize=4)
categories = ["Audio", "Computer Vision","Graphs", "Natural Language Processing", "Reinforcement Learning", "Sequential"]
colors = ['red', 'blue', 'pink', 'purple', 'yellow', 'green']
def create_hist(df):
plt.figure(figsize=(20, 10))
df = df[['PipelineID', 'f1-score_overall']]
for i in range(len(categories)):
df1 = df[df['PipelineID'].str.contains(categories[i])]['f1-score_overall'].astype(str).str[:4].astype(float)
print(df1[:4])
plt.hist(df1, bins=10, color=colors[i], alpha=0.7, label=categories[i], width=0.01)
plt.xlim(xmin=0.0, xmax = 1.0)
plt.title('Final results structured random undersampling, preprocessed')
plt.legend()
df = pd.read_csv('final_results_structured_sampling_preprocessed.csv', delimiter=';')
create_hist(df)
plt.savefig('results/pics/results_structured_sampling_preprocessed.csv.png')
plt.show()
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,379
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Evaluation/prediction.py
|
import argparse
import csv
from pathlib import Path
import pickle
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from bert.tokenization import bert_tokenization as tokenization
class Predictor:
"""
This class is used to run predictions with given models on given datasets and save the results. Also collects all the labels.
Methods
----------
predict: Used to load the given models, run the predictions and save the results.
saveData: Used to save the prediction data.
Attributes
----------
models_path: (Path) Path to the folder containing the models used to predict.
data_path: (str) Path to the csv file containing the samples.
out_data: (Dict) Used to store the predictions.
out_path: (Path) Path to the output file. Defaults to {models_path}/predictions/{models_folder}.csv
"""
def __init__(self, models_path: str, data_path: str, out_path: str = None, bert: bool = False):
"""
Params
--------
models_path: (Path) Path to the folder containing the models used to predict.
data_path: (str) Path to the csv file containing the samples.
out_path: (Path) Path to the output file. Defaults to {models_path}/predictions/{models_folder}.csv
"""
self.models_path = Path(models_path)
self.data_path = data_path
self.bert = bert
if out_path is not None:
self.out_path = Path(out_path)
else:
model_path_path = Path(models_path)
model_path_path = model_path_path / f'predictions/{model_path_path.name}.csv'
model_path_path.parent.mkdir(parents=True, exist_ok=True)
self.out_path = model_path_path
self.out_path.parent.mkdir(parents=True, exist_ok=True)
self.out_data = {}
def __loadModels(self) -> None:
"""
Loads and stores all the models from the given folder.
"""
self.__models = []
self.model_labels = []
for model_file in self.models_path.iterdir():
if self.bert:
self.__models.append(tf.keras.models.load_model(model_file.absolute().as_posix(), custom_objects={'KerasLayer': hub.KerasLayer}))
else:
with open(model_file, 'rb') as f:
self.model_labels.append(model_file.name.split('.')[0].replace('_', ' '))
self.__models.append(pickle.load(f))
def saveData(self) -> None:
"""
Saves the predictions into {self.out_path} csv file with ['Labels', 'Repo', 'Predictions'] columns.
"""
with open(self.out_path, 'w') as f:
writer = csv.DictWriter(f, delimiter=';', fieldnames=['Labels', 'Repo', 'Predictions', 'Probabilities'])
writer.writeheader()
for key, val in self.out_data.items():
writer.writerow({
'Labels': ','.join(val['Labels']),
'Repo': key,
'Predictions': ','.join(val['Predictions']),
'Probabilities': ','.join(val['Probabilities']),
})
def encoding_the_text(self, texts, tokenizer, max_len=300):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len-len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def getBertTokenizer(self):
m_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'
bert_layer = hub.KerasLayer(m_url, trainable=True)
tf.gfile = tf.io.gfile
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
return tokenization.FullTokenizer(vocab_file, do_lower_case)
def getLabelsFromPrediction(self, prediction) -> set[str]:
categories_alphabetical = ['Audio', 'Computer Vision', 'Graphs', 'Natural Language Processing', 'Reinforcement Learning', 'Sequential']
print(prediction)
above_thresholds_indices = np.argwhere(prediction > 0.5)
res = []
if above_thresholds_indices:
for ind in above_thresholds_indices:
res.append(categories_alphabetical[ind[0]])
return set(res)
def predict(self):
"""
Loads and stores all the models from the given folder.\n
Runs and collects the predictions on the given samples.\n
Also collects and merges all the labels for the repositories.\n
Saves the predictions by calling saveData().
"""
self.__loadModels()
self.out_data = {}
if self.bert:
tokenizer = self.getBertTokenizer()
with open(self.data_path) as f:
reader = csv.DictReader(f, delimiter=';')
text_data = [row for row in reader]
texts = [row['Text'] for row in text_data]
text = self.encoding_the_text(texts, tokenizer)
print(len(texts), len(text))
predictions = [model.predict(text) for model in self.__models]
for i, row in enumerate(text_data):
repo = row['Repo']
if repo in self.out_data:
self.out_data[repo]['Labels'].add(row['Label'])
else:
self.out_data[repo] = {'Labels' : set([row['Label']]), 'Predictions': set()}
for prediction in predictions:
labels = self.getLabelsFromPrediction(prediction[i])
self.out_data[repo]['Probabilities'] = prediction[i]
print(labels)
self.out_data[repo]['Predictions'].update(labels)
else:
with open(self.data_path) as f:
reader = csv.DictReader(f, delimiter=';')
for row in reader:
repo = row['Repo']
if repo in self.out_data:
self.out_data[repo]['Labels'].add(row['Label'])
else:
self.out_data[repo] = {'Labels' : set([row['Label']]), 'Predictions': set(), 'Probabilities': []}
for model_ind, model in enumerate(self.__models):
pred = model.predict([row['Text']])
pred_proba = model.predict_proba([row['Text']])
model_label = self.model_labels[model_ind]
#print(f'{model_label=}')
#print(pred, pred_proba)
if 'Other' > model_label:
self.out_data[repo]['Probabilities'].append(str(pred_proba[0][0]))
else:
self.out_data[repo]['Probabilities'].append(str(pred_proba[0][1]))
if pred != 'Other':
self.out_data[repo]['Predictions'].add(pred[0])
self.saveData()
if __name__ == '__main__':
parser_predict = argparse.ArgumentParser('python src/Evaluation/prediction.py', description='Predict with the given models.')
parser_predict.add_argument('--inputfolder', required=True, help='Path of folder with the models.')
parser_predict.add_argument('--test_set', required=True, help='Name of the csv file containing the test set.')
parser_predict.add_argument('--outfile', required=True, help='Path to output csv file with the results.')
parser_predict.add_argument('--bert', action=argparse.BooleanOptionalAction)
args = parser_predict.parse_args()
predictor = Predictor(args.inputfolder, args.test_set, args.outfile, args.bert).predict()
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,380
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Report/CrossValidateNN.py
|
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import numpy as np
import pandas as pd
import os
def cross_validate_NN(model, X, y, X_test, y_test,name="NN", fit_params=None, scoring=None, n_splits=5, save=True, batch_size = 32, use_multiprocessing=True):
'''
Function create a metric report automatically with cross_validate function.
@param model: (model) neural network model
@param X: (list or matrix or tensor) training X data
@param y: (list) label data
@param X_test: (list or matrix or tensor) testing X data
@param y_test: (list) label test data
@param name: (string) name of the model (default classifier)
@param fit_aparams: (dict) add parameters for model fitting
@param scoring: (dict) dictionary of metrics and names
@param n_splits: (int) number of fold for cross-validation (default 5)
@return: (pandas.dataframe) dataframe containing all the results of the metrics
for each fold and the mean and std for each of them
'''
# ---- Parameters initialisation
es = tf.keras.callbacks.EarlyStopping(monitor='loss', mode='auto', patience=3)
seed = 42
k = 1
np.random.seed(seed)
kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
# Creation of list for each metric
if scoring==None: # create a dictionary if none is passed
dic_score = {}
if scoring!=None: # save the dict
dic_score = scoring.copy()
dic_score["fit_time"] = None # initialisation for time fitting and scoring
dic_score["score_time"] = None
scorer = {}
for i in dic_score.keys():
scorer[i] = []
index = ["Model"]
results = [name]
# ---- Loop on k-fold for cross-valisation
for train, test in kfold.split(X, y): # training NN on each fold
# create model
print(f"k-fold : {k}")
_model = tf.keras.models.clone_model(model)
if len(np.unique(y))==2: # binary
_model.compile(optimizer='adam',
loss=tf.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
else: # multiclass
_model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
_model.fit(X[train], y[train],
epochs=1000, callbacks=[es], validation_data=(X[test], y[test]),
verbose=False, batch_size = batch_size, use_multiprocessing=use_multiprocessing)
y_pred = (_model.predict(X[test])>0.5).astype(int)
#if len(set(y))>2:
# y_pred =np.argmax(y_pred,axis=1)
#print(y_test[0], y_pred[0])
if len(set(y))==2:
print(f"Precision: {round(100*precision_score(y[test], y_pred), 3)}% , Recall: {round(100*recall_score(y[test], y_pred), 3)}%, Time \t {round(fit_end, 4)} ms")
else:
print(f"Precision: {round(100*precision_score(y[test], np.argmax(y_pred,axis=1), average='weighted'), 3)}% , Recall: \
{round(100*recall_score(y[test], np.argmax(y_pred,axis=1), average='weighted'), 3)}%, Time \t {round(fit_end, 4)} ms")
# ---- save each metric
for i in dic_score.keys(): # compute metrics
if i == "fit_time":
index.append(i+'_cv'+str(k))
continue
if i == "score_time":
index.append(i+'_cv'+str(k))
continue
if len(set(y))>2:
if i in ["prec", "recall", "f1-score"]:
scorer[i].append(dic_score[i](y[test], np.argmax(y_pred,axis=1), average = 'weighted')) # make each function scorer
elif i=="roc_auc":
scorer[i].append(dic_score[i](to_categorical(y[test]), y_pred, average = 'macro', multi_class="ovo")) # make each function scorer
else:
scorer[i].append(dic_score[i]( y[test], np.argmax(y_pred,axis=1))) # make each function scorer
else:
scorer[i].append(dic_score[i]( y[test], y_pred)) # make each function scorer
#scorer[i].append(dic_score[i]( y[test], y_pred))
index.append("test_"+i+'_cv'+str(k))
results.append(scorer[i][-1])
K.clear_session()
del _model
k+=1
# Train test on the overall data
print("Overall train-test data")
_model = tf.keras.models.clone_model(model)
if len(np.unique(y))==2: # binary
_model.compile(optimizer='adam',
loss=tf.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
else: # multiclass
_model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
_model.fit(X[train], y[train],
epochs=1000, callbacks=[es], validation_data=(X[test], y[test]),
verbose=False)
if save:
check_p = tf.keras.callbacks.ModelCheckpoint('results/models/lstm.h5', save_best_only=True)
_model.fit(X, y,epochs=1000, callbacks=[es, check_p], validation_split=0.2, batch_size = batch_size,
verbose=False, use_multiprocessing=use_multiprocessing)
else:
_model.fit(X, y,epochs=1000, callbacks=[es], validation_split=0.2, batch_size = batch_size,
verbose=False, use_multiprocessing=use_multiprocessing)
#_acc = _model.evaluate(X_test, y_test, verbose=0)
y_pred = (_model.predict(X_test)>0.5).astype(int)
#if len(set(y))>2:
# y_pred =np.argmax(y_pred,axis=1)
if len(set(y))==2:
print(f"Precision: {round(100*precision_score(y_test, y_pred), 3)}% , Recall: {round(100*recall_score(y_test, y_pred), 3)}%, Time \t {round(fit_end, 4)} ms")
else:
print(f"Precision: {round(100*precision_score(y_test, np.argmax(y_pred,axis=1), average='weighted'), 3)}% , Recall: \
{round(100*recall_score(y_test, np.argmax(y_pred,axis=1), average='weighted'), 3)}%, Time \t {round(fit_end, 4)} ms")
# Compute mean and std for each metric
for i in scorer:
results.append(np.mean(scorer[i]))
results.append(np.std(scorer[i]))
if i == "fit_time":
index.append(i+"_mean")
index.append(i+"_std")
continue
if i == "score_time":
index.append(i+"_mean")
index.append(i+"_std")
continue
index.append("test_"+i+"_mean")
index.append("test_"+i+"_std")
# add metrics averall dataset on the dictionary
for i in dic_score.keys(): # compute metrics
if i == "fit_time":
index.append(i+'_overall')
continue
if i == "score_time":
index.append(i+'_overall')
continue
if len(set(y))>2:
if i in ["prec", "recall", "f1-score"]:
scorer[i].append(dic_score[i](y_test, np.argmax(y_pred,axis=1), average = 'weighted')) # make each function scorer
elif i=="roc_auc":
scorer[i].append(dic_score[i](to_categorical(y_test), y_pred, average = 'weighted', multi_class="ovo")) # make each function scorer
else:
scorer[i].append(dic_score[i]( y_test, np.argmax(y_pred,axis=1))) # make each function scorer
else:
#scorer[i].append(dic_score[i]( y[test], y_pred))
scorer[i].append(dic_score[i](_model, X_test, y_test))
index.append(i+'_overall')
results.append(scorer[i][-1])
return pd.DataFrame(results, index=index).T
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,381
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/train.py
|
#from cgi import print_environ
#from time import time
from pathlib import Path
from typing import Dict, List
import pandas as pd
#from sklearn import datasets
#from Preprocessor import Preprocessor
#from sklearn.preprocessing import LabelEncoder
import numpy as np
import csv
from sklearn.linear_model import LogisticRegression
from imblearn.under_sampling import RandomUnderSampler
#from sklearn.utils.class_weight import compute_class_weight
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
#from sklearn.linear_model import SGDClassifier
#from sklearn.ensemble import AdaBoostClassifier
#from sklearn.naive_bayes import MultinomialNB
from ResultStorage import ResultStorage
#from tqdm import tqdm
#import fasttext
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
#from Vectorizing.CountVectorizer import getCountVectorizer
#from Vectorizing.TF_IDF_Vectorizer import getWordLevelVectorizer, getNGramLevelVectorizer
#from Embedding.WordEmbedding import createWordEmbedding
from Report import Report
#from Report.CrossValidateNN import cross_validate_NN
#from lstmModel import create_lstm_model, create_bidirec_lstm_model
from sklearn.calibration import CalibratedClassifierCV
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import StandardScaler
import logthis
TEXT = "Text"
LABEL = "Label"
CV_splits = 5
def filter_dataframe(df: pd.DataFrame, category: str) -> None:
for ind, row in df.iterrows():
if category != str(row[LABEL]):
row[LABEL] = 'Other'
#print(f'{cat} filtered {count} rows in training dataset')
def get_sampling_strategy(df_train: pd.DataFrame, categories: List[str], cat: str) -> Dict[str, int]:
df =df_train.drop(df_train[df_train[LABEL] == 'General'].index)
sizes = df.groupby(LABEL).size()
indexes = list(sizes.index)
cat_size = sizes[indexes.index(cat)]
# If the category is bigger than the sum of the other categories
#print(df[df[LABEL] != 'Other'].groupby(LABEL).size().sum())
if cat_size > df.groupby(LABEL).size().sum() - cat_size:
cat_size = df.groupby(LABEL).size().sum() - cat_size
other_cat_size = int(cat_size/(len(categories)+1))+1
sampling_strategy = {}
change = 0
for c in categories+['Other', 'General']:
if c == cat:
sampling_strategy[c] = cat_size
elif c not in categories+['Other']:
sampling_strategy[c] = 0
else:
s = other_cat_size+change
sampling_strategy[c] = min(s, sizes[indexes.index(c)])
change = 0
if sampling_strategy[c] < s:
change += s - sampling_strategy[c]
if sizes[indexes.index(c)] < other_cat_size:
change += other_cat_size - sizes[indexes.index(c)]
else:
change += 0
logthis.say(f'Sampling strategy: {str(sampling_strategy)}',)
return sampling_strategy
def train_models(train: str, out_folder: str, results_file:str, categories: List[str] = None, evaluation_metric: str = "test_f1-score_mean") -> None:
if categories is None:
categories = ["Sequential", "Audio", "Computer Vision","Graphs", "Reinforcement Learning", "Natural Language Processing", "Astrophisics"]
print(categories)
logthis.say(f'Read files\nTrain dataset: {train}')
df_train = pd.read_csv(train, sep=';')
#df_test.drop_duplicates(subset=['Text'], inplace=True, keep='last')
df_train = df_train.drop(columns = 'Repo')
logthis.say('Read done')
for i, cat in enumerate(categories):
ind = i + 1
logthis.say(f'Train test split starts for {cat=} category {ind}/{len(categories)}')
df_train = pd.read_csv(train, sep=';')
#df_test.drop_duplicates(subset=['Text'], inplace=True, keep='last')
df_train = df_train.drop(columns = 'Repo')
x_train = df_train[TEXT].astype('U')
y_train = df_train[LABEL]
#undersample = RandomUnderSampler(sampling_strategy=get_sampling_strategy(df_train, categories, cat))
#undersample = RandomUnderSampler(sampling_strategy='majority')
#x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
logthis.say(f'Filter starts for {cat=} category {ind}/{len(categories)}')
y_train = y_train.to_frame(LABEL)
print(y_train['Label'].unique())
filter_dataframe(y_train, cat)
print(y_train['Label'].unique())
y_train = np.ravel(y_train)
logthis.say(f'Filtering done')
#logthis.say('Other: ' + str(np.count_nonzero(y_train == 'Other')))
#logthis.say('CV: ' + str(np.count_nonzero(y_train == 'Computer Vision')))
undersample = RandomUnderSampler(sampling_strategy='majority')
x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
#countvect = CountVectorizer(analyzer="word",token_pattern=r'\w{1,}', max_features=10000, lowercase=True)
#tfidf = TfidfVectorizer(analyzer="word",token_pattern=r'\w{1,}', max_features=10000, lowercase=True)
result_storage = ResultStorage(cat, evaluation_metric)
logthis.say(f'Logistic regression starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('countvect', CountVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('lr', LogisticRegression(max_iter=100000, C=10, penalty='l2', random_state=1))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='LR_Count_Vectors_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('lr', LogisticRegression(max_iter=10000, C=10, penalty='l2', random_state=1))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='LR_TFIDF_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
logthis.say(f'SVC starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('countvect', CountVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('svc', SVC(probability=True, C=100, gamma=0.1, kernel='rbf'))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='SVC_Count_Vectors_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('svc', SVC(probability=True, C=100, gamma=0.1, kernel='rbf'))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='SVC_TFIDF_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
logthis.say(f'KNeighborsClassifier starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('countvect', CountVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('knn', KNeighborsClassifier(leaf_size=40, metric='minkowski', n_neighbors=20, p=2, weights='distance'))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='KNN_Count_Vectors_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('knn', KNeighborsClassifier(leaf_size=40, metric='minkowski', n_neighbors=20, p=2, weights='distance'))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='KNN_TFIDF_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
logthis.say(f'RandomForestClassifier starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('countvect', CountVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('random_forest', RandomForestClassifier(max_features='sqrt', random_state=42, bootstrap=False, max_depth=None, min_samples_leaf=1, min_samples_split=5, n_estimators=100))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='RandomForestClassifier_Count_Vectors_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('random_forest', RandomForestClassifier(max_features='sqrt', random_state=42, bootstrap=False, max_depth=None, min_samples_leaf=1, min_samples_split=5, n_estimators=100))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='RandomForestClassifier_TFIDF_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
logthis.say(f'LinearSVC starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('countvect', CountVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('linear_svc', LinearSVC(C=0.0001, dual=False))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='Linear_SVC_Count_Vectors_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=0.9, min_df=0.1, max_features=None, ngram_range=(1, 1))),
('linear_svc', LinearSVC(C=0.0001, dual=False))])
result_storage.processResult(*Report.report(pipeline, train, x_train[TEXT], y_train, cat, name='Linear_SVC_TFIDF_RandomUnder', cv=CV_splits, dict_scoring=Report.score_metrics, save=False))
#logthis.say('###################################################')
#logthis.say(pipeline.get_params())
#logthis.say('###################################################')
result_storage.dumpBestModel(out_folder)
result_storage.dumpResults(results_file)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,382
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/count_word_occurences.py
|
import csv
import matplotlib.pyplot as plt
from collections import defaultdict, Counter
import argparse
from pathlib import Path
import sys
csv.field_size_limit(sys.maxsize)
def countWordOccurences(filename: str, limit: int, output_folder: str, cat: str):
output_path = Path(output_folder)
output_path.mkdir(parents=True, exist_ok=True)
categories = defaultdict(lambda : Counter())
sample_count = Counter()
reader = csv.DictReader(open(filename), delimiter=';')
print(cat)
for row in reader:
if cat != 'all':
if row['Label'] == cat:
categories['Label'].update(row['Text'].split(' '))
sample_count['Label'] += 1
else:
categories['Label'].update(row['Text'].split(' '))
sample_count['Label'] += 1
for category, counter in categories.items():
most_commons = counter.most_common(limit)
keys = [key[0] for key in most_commons]
values = [key[1] for key in most_commons]
plt.figure(figsize=(20,15))
plt.bar(keys, values, color='#00c7c3')
plt.title(f'Most frequent words in preprocessed readmes for '+cat+' category', size=18, weight='bold')
plt.xlabel('Words', size=16, weight='bold')
plt.ylabel('Frequency', size=16, weight='bold')
plt.axhline(y = sample_count[category], color = '#ff556b', linestyle = '-', label='Number of samples')
plt.xticks(rotation=40, size=16)
plt.yticks(size=14)
plt.legend(prop={'size': 14})
plt.savefig(output_path / f'{cat.lower().replace(" ", "_")}.svg')
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("python src/util/count_word_occurences.py", description='Count word occurences.')
parser.add_argument('filename', help='name of the file with the preprocessed data')
parser.add_argument('--limit', type=int, default=20, required=False, help='Set the limit of the common words. (default 20)')
parser.add_argument('--outFolder', type=str, default='results/pics/', required=False, help='Give the folder for the output. (default "results/")')
parser.add_argument('--category', type=str, default='all', required=False, help='Define which category to do the analysis for')
args = parser.parse_args()
if args.category:
countWordOccurences(args.filename, args.limit, args.outFolder, args.category)
else:
countWordOccurences(args.filename, args.limit, args.outFolder)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,383
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Experiments/best_model.py
|
from cgi import print_environ
from time import time
from typing import List
from sklearn.model_selection import GridSearchCV
import pandas as pd
from sklearn import datasets
from sklearn.preprocessing import LabelEncoder
import numpy as np
import csv
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from imblearn.under_sampling import RandomUnderSampler
from sklearn.utils.class_weight import compute_class_weight
from sklearn.svm import SVC, LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import MultinomialNB
from tqdm import tqdm
import fasttext
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.calibration import CalibratedClassifierCV
import logthis
from src.ResultStorage import ResultStorage
TEXT = "Text"
LABEL = "Label"
CV_splits = 5
def filter_dataframe(df, cat):
#count = 0
for ind, row in df.iterrows():
if cat != str(row[LABEL]):
#count += 1
row[LABEL] = 'Other'
#print(f'{cat} filtered {count} rows in training dataset')
def get_sampling_strategy(df_train, categories: list, cat: str):
sizes = df_train.groupby(LABEL).size()
indexes = list(sizes.index)
cat_size = sizes[indexes.index(cat)]
# If the category is bigger than the sum of the other categories
other_cat_size = int(cat_size/(len(df_train[LABEL].unique())-2))+1
if cat_size > df_train.groupby(LABEL).size().sum() - cat_size:
cat_size = df_train.groupby(LABEL).size().sum() - cat_size
sampling_stratgy = {}
change = 0
for c in df_train[LABEL].unique():
if c == cat:
sampling_stratgy[c] = cat_size
elif c not in categories:
sampling_stratgy[c] = 0
else:
if sizes[indexes.index(c)] < other_cat_size:
change = other_cat_size - sizes[indexes.index(c)]
else:
change = 0
sampling_stratgy[c] = min(other_cat_size+change, sizes[indexes.index(c)])
logthis.say(f'Sampling strategy: {str(sampling_stratgy)}',)
return sampling_stratgy
def train_models(train: str, test: str, out_folder: str, results_file:str, categories: List[str] = None, evaluation_metric: str = "test_f1-score_mean") -> None:
if categories is None:
categories = ["Natural Language Processing", "Computer Vision", "Sequential", "Audio", "Graphs", "Reinforcement Learning"]
logthis.say(f'Read files\nTrain dataset: {train} \nTest dataset: {test}')
df_train = pd.read_csv(train, sep=';')
df_test = pd.read_csv(test, sep = ';')
#df_test.drop_duplicates(subset=['Text'], inplace=True, keep='last')
df_train = df_train.drop(columns = 'Repo')
logthis.say('Read done')
for i, cat in enumerate(categories):
ind = i + 1
logthis.say(f'Train test split starts for {cat=} category {ind}/{len(categories)}')
x_train = df_train[TEXT].astype('U')
y_train = df_train[LABEL]
x_test = df_test[TEXT].astype('U')
y_test = df_test[LABEL]
#undersample = RandomUnderSampler(sampling_strategy=get_sampling_strategy(df_train, categories, cat))
undersample = RandomUnderSampler(sampling_strategy='majority')
x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
get_sampling_strategy(y_train.to_frame(LABEL), categories, cat)
logthis.say(f'Filter starts for {cat=} category {ind}/{len(categories)}')
y_train = y_train.to_frame(LABEL)
filter_dataframe(y_train, cat)
y_test = y_test.to_frame(LABEL)
filter_dataframe(y_test, cat)
y_train = np.ravel(y_train)
y_test = np.ravel(y_test)
logthis.say(f'Filtering done')
result_storage = ResultStorage(train, test, cat, evaluation_metric)
logthis.say(f'Logistic regression starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
('lr', LogisticRegression(max_iter=90000, random_state=1))])
param_grid = {
'lr__penalty': ['l1', 'l2'],
'lr__C': [1, 2, 4, 5, 7, 9, 10]
}
logthis.say(pipeline.get_params().keys())
search = GridSearchCV(pipeline, param_grid, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
logthis.say("Best parameter (CV score=%0.3f):" % search.best_score_)
logthis.say(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/model_lr_search_'+ cat + '.csv', sep=';')
logthis.say(f'SVC starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
('svc', SVC(probability=True))])
param_grid = {
'svc__C':[1,10,100,1000],
'svc__gamma':[1,0.1,0.001,0.0001],
'svc__kernel':['linear','rbf']
}
logthis.say(pipeline.get_params().keys())
search = GridSearchCV(pipeline, param_grid, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
logthis.say("Best parameter (CV score=%0.3f):" % search.best_score_)
logthis.say(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/model_svc_search_'+ cat + '.csv', sep=';')
logthis.say(f'KNeighborsClassifier starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
('knn', KNeighborsClassifier(n_neighbors=20, weights='distance', n_jobs=-1))])
param_grid = {
'knn__n_neighbors': (1,10,20, 30),
'knn__leaf_size': (20,40,1),
'knn__p': (1,2),
'knn__weights': ('uniform', 'distance'),
'knn__metric': ('minkowski', 'chebyshev')
}
logthis.say(pipeline.get_params().keys())
search = GridSearchCV(pipeline, param_grid, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
logthis.say("Best parameter (CV score=%0.3f):" % search.best_score_)
logthis.say(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/model_knn_search_'+ cat + '.csv', sep=';')
logthis.say(f'RandomForestClassifier starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
('randomforest', RandomForestClassifier(bootstrap=True,min_impurity_decrease=1e-7,n_jobs=-1, random_state=42))])
param_grid = {
'randomforest__n_estimators': [int(x) for x in np.linspace(start = 200, stop = 2000, num = 2)],
'randomforest__max_features': ['auto', 'sqrt'],
'randomforest__max_depth': [int(x) for x in np.linspace(10, 100, num = 2)]+[None],
'randomforest__min_samples_split': [2, 5, 10],
'randomforest__min_samples_leaf': [1, 2, 4],
'randomforest__bootstrap': [True, False]
}
logthis.say(pipeline.get_params().keys())
search = GridSearchCV(pipeline, param_grid, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
logthis.say("Best parameter (CV score=%0.3f):" % search.best_score_)
logthis.say(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/model_randomforest_search_'+ cat + '.csv', sep=';')
logthis.say(f'LinearSVC starts for {cat=} category {ind}/{len(categories)}')
pipeline = Pipeline([
('tfidf', TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
('linearSVC', LinearSVC() )])
param_grid = {
'linearSVC__base_estimator__C': [0.00001, 0.0001, 0.0005],
'linearSVC__base_estimator__dual': (True, False),
'linearSVC__base_estimator__random_state': [1]
}
logthis.say(pipeline.get_params().keys())
search = GridSearchCV(pipeline, param_grid, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
logthis.say("Best parameter (CV score=%0.3f):" % search.best_score_)
logthis.say(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/model_linearsvc_search_'+ cat + '.csv', sep=';')
if __name__ == "__main__":
train_models('data/train_test_data/readme_new_preprocessed_train.csv', 'data/train_test_data/readme_new_preprocessed_test.csv', 'data/search/', 'search_model.csv', ["Natural Language Processing", "Computer Vision", "Sequential", "Audio", "Graphs", "Reinforcement Learning"])
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,384
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Experiments/best_sampler.py
|
#from sklearn.pipeline import Pipeline
from typing import List
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import logthis
import pandas as pd
import numpy as np
from sklearn.svm import LinearSVC
from imblearn.under_sampling import RandomUnderSampler
from sklearn.linear_model import LogisticRegression
from imblearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import cross_validate
from src.ResultStorage import ResultStorage
TEXT = "Text"
LABEL = "Label"
def filter_dataframe(df, cat):
for ind, row in df.iterrows():
if cat != str(row[LABEL]):
row[LABEL] = 'Other'
def get_sampling_strategy(df_train, categories: list, cat: str):
sizes = df_train.groupby(LABEL).size()
indexes = list(sizes.index)
cat_size = sizes[indexes.index(cat)]
# If the category is bigger than the sum of the other categories
other_cat_size = int(cat_size/(len(df_train[LABEL].unique())-2))+1
if cat_size > df_train.groupby(LABEL).size().sum() - cat_size:
cat_size = df_train.groupby(LABEL).size().sum() - cat_size
sampling_stratgy = {}
change = 0
for c in df_train[LABEL].unique():
if c == cat:
sampling_stratgy[c] = cat_size
elif c not in categories:
sampling_stratgy[c] = 0
else:
if sizes[indexes.index(c)] < other_cat_size:
change = other_cat_size - sizes[indexes.index(c)]
else:
change = 0
sampling_stratgy[c] = min(other_cat_size+change, sizes[indexes.index(c)])
logthis.say(f'Sampling strategy: {str(sampling_stratgy)}',)
return sampling_stratgy
def train_models(train: str, test: str, out_folder: str, results_file:str, categories: List[str] = None, evaluation_metric: str = "test_f1-score_mean") -> None:
if categories is None:
categories = ["Natural Language Processing", "Computer Vision", "Sequential", "Audio", "Graphs", "Reinforcement Learning"]
logthis.say(f'Read files\nTrain dataset: {train} \nTest dataset: {test}')
df_train = pd.read_csv(train, sep=';')
df_train = df_train.drop(columns = 'Repo')
logthis.say('Read done')
for i, cat in enumerate(categories):
result_storage = ResultStorage(train, test, cat, evaluation_metric)
ind = i + 1
logthis.say(f'Train test split starts for {cat=} category {ind}/{len(categories)}')
x_train = df_train[TEXT].astype('U')
y_train = df_train[LABEL]
undersample = RandomUnderSampler(sampling_strategy=get_sampling_strategy(df_train, categories, cat))
#undersample = RandomUnderSampler(sampling_strategy='majority')
x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
logthis.say(f'Filter starts for {cat=} category {ind}/{len(categories)}')
y_train = y_train.to_frame(LABEL)
filter_dataframe(y_train, cat)
y_train = np.ravel(y_train)
logthis.say(f'Filtering done')
pipeline = Pipeline(
[
("vect", TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
("clf_structured", LinearSVC(max_iter=900000)),
]
)
scores = cross_validate(pipeline, x_train[TEXT], y_train, scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'],
cv=5, return_train_score=True, n_jobs=-1, return_estimator=True)
print('Scores: ', scores)
df = pd.DataFrame(scores)
x_train = df_train[TEXT].astype('U')
y_train = df_train[LABEL]
#undersample = RandomUnderSampler(sampling_strategy=get_sampling_strategy(df_train, categories, cat))
undersample = RandomUnderSampler(sampling_strategy='majority')
x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
logthis.say(f'Filter starts for {cat=} category {ind}/{len(categories)}')
y_train = y_train.to_frame(LABEL)
filter_dataframe(y_train, cat)
y_train = np.ravel(y_train)
logthis.say(f'Filtering done')
pipeline = Pipeline(
[
("vect", TfidfVectorizer(max_df=1.0, min_df=0.0, max_features=None, ngram_range=(1, 2))),
("clf_majority", LinearSVC(max_iter=900000)),
]
)
scores = cross_validate(pipeline, x_train[TEXT], y_train, scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'],
cv=5, return_train_score=True, n_jobs=-1, return_estimator=True)
df1 = pd.DataFrame(scores)
df = df.append(df1)
#print(df)
df.to_csv('data/search/sampler_search_'+ cat + '.csv', sep=';')
if __name__ == "__main__":
train_models('data/readme_new_preprocessed_train.csv', 'data/readme_new_preprocessed_test.csv')
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,385
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/create_negative_samples.py
|
from pathlib import Path
import pandas as pd
readmes_path = Path('data/awesome_lists_links/readme/Other')
fieldnames = ['Label', 'Repo', 'Text']
data = {key: [] for key in fieldnames}
i = 0
for file in readmes_path.iterdir():
i += 1
with open(file) as f:
data['Text'].append('"' + ' '.join([line.replace('\n', ' ') for line in f]) + '"')
data['Repo'].append(f'repo{i}')
data['Label'].append('Other')
path = Path("data/negative_samples.csv")
df = pd.DataFrame([], columns=fieldnames)
df = pd.concat([df, pd.DataFrame(data)])
df.drop_duplicates(inplace=True)
df.to_csv(path, sep=';', index=False)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,386
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/main.py
|
import json
import sys
from typing import List
import pandas as pd
import argparse
from sklearn.model_selection import train_test_split
import logthis
from preprocessing import preprocess_file
from util.utils import BASE_CATEGORIES, getCategories
from Evaluation.evaluation import Evaluator
from Evaluation.prediction import Predictor
import collectreadmes
def train_test_split_file(filename, test_size = 0.2, new_category = True):
"""
Performs train-test split on the given csv file with given ratio. Saves the files next to the input file with "_train.csv" and
"_test.csv" suffixes.
Params
---------
filename: (str) Path to the input csv file.
test_size: (float) Test dataset ratio. (default: 0.2)
new_category: (bool) If False, splits the data into repositories with only one category (train) and with multiple categories (test).
If True, splits depending on test_size parameter. (default: True)
"""
logthis.say('Train test set separation starts.')
df: pd.DataFrame = pd.read_csv(filename, sep=';')
if new_category:
train, test = train_test_split(df, test_size=test_size, random_state=42, stratify=df['Label'])
else:
train = df.drop_duplicates(subset=['Text'], keep=False)
test = df[df.duplicated(subset=['Text'], keep=False)]
train.to_csv(filename.replace('.csv', '_train.csv'), sep=';', index=False)
test.to_csv(filename.replace('.csv', '_test.csv'), sep=';', index=False)
logthis.say('Train test set separation done.')
def merge_csv_files(files: List[str], outfile: str):
"""
Merges multiple csv files with same columns into one. Used to merge new train and test dataset into old ones.
Params
--------
files: (List[str]) List of paths of the filenames to merge.
outfile: (str) Path to the output file.
"""
logthis.say('Merging files starts.')
data: pd.DataFrame = pd.DataFrame([], columns=['Label', 'Repo', 'Text'])
num_files = len(files)
for index, file in enumerate(files):
logthis.say(f'Merging files {100*(index+1)/num_files:.2f}% {index+1}/{num_files}')
df = pd.read_csv(file, sep=';')
data = pd.concat([df, data])
logthis.say(f'Write data to {outfile}')
data.to_csv(outfile, sep=';', index=False)
logthis.say('Merging files done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="python src/final.py", description='Perform all the methods of the program.')
subparsers = parser.add_subparsers(dest='command', help="Select the command to perform.")
parser_collect_readmes = subparsers.add_parser('collect_readmes', help="Collect readme files, create dataset.",
description="Collect readmes from collected urls from given file rows.",
epilog ="""Example: python3 src/collectreadmes.py --input_mode csvfile --input data/awesome_lists_links/awesome_lists.csv
--githublinks_file data/awesome_lists_links/repos1.csv --awesome_list_mode
--readme_folder data/awesome_lists_links/readme --outfolder data/new_datasets"""
)
parser_collect_readmes.add_argument("--input_mode", required=True, choices=collectreadmes.ReadmeCollector.input_modes, help="Set input mode. The input can be given by a csvfile or an url in comand line.")
parser_collect_readmes.add_argument("--input", required=True, help="Give the input.")
parser_collect_readmes.add_argument("--category", required='url' in sys.argv, help="Set category of input url. (Required if url input_mode is used)")
parser_collect_readmes.add_argument('--awesome_list_mode', action='store_true', default=False, help='Set mode of links to awesome list.')
parser_collect_readmes.add_argument("--githublinks_file", help='Give file to save collected githubs if awesome lists are given.')
parser_collect_readmes.add_argument('--readme_folder', required=True, help='Path to the folder where readme files will be saved per category.')
parser_collect_readmes.add_argument('--outfolder', required=True, help='Path to the folder, where database per category will be saved.')
parser_collect_readmes.add_argument('--redownload', help='Redownload the readmes.', action='store_true', default=False)
parser_collect_readmes.add_argument('--input_delimiter', help='Set delimiter of input csv file (default: ";").', default=';')
parser_preprocess = subparsers.add_parser('preprocess', help="Preprocess given csv data file.")
parser_preprocess.add_argument('--preprocess_file', required=True, help='Name of .csv the file with the preprocessed data. The file will be saved in the same filename with "_preprocessed" suffix.')
parser_train_test_split = subparsers.add_parser('train_test_split', help="Makes train test split on given csv file.")
parser_train_test_split.add_argument('--train_test_file', required=True, help='Name of the file to split.')
parser_train_test_split.add_argument('--test_size', default=0.2, type=float, help='Size of the test set (default: 0.2).')
parser_merge_csv = subparsers.add_parser('merge_csv', help='Merge given csv files into one.')
parser_merge_csv.add_argument('--files', required=True, nargs="+", help='List of csv files to merge with the same header row and ";" delimiter.')
parser_merge_csv.add_argument('--outfile', required=True, help='Path to output csv file with the results.')
parser_train_models = subparsers.add_parser('train_models', help='Train the models.')
parser_train_models.add_argument('--train_set', required=True, help='Name of the csv file containing train set.')
parser_train_models.add_argument('--results_file', required=True, help='Path to the file where results will be saved.')
parser_train_models.add_argument('--out_folder', required=True, help='Path to the folder where models will be saved.')
parser_train_models.add_argument('--evaluation_metric', default='test_f1-score_mean', help='Name of the key for evaliuation (default: "f1-score_overall").')
parser_train_models.add_argument('--gridsearch', default='nogridsearch', choices=['nogridsearch', 'bestmodel', 'bestsampler', 'bestvectorizer', 'all'], help='Set gridsearch mode. (default: nogridsearch)')
parser_train_models_categories = parser_train_models.add_mutually_exclusive_group(required=False)
parser_train_models_categories.add_argument('--all_categories', nargs="+", help=f'List of all categories used. Use only if you want not the basic categories. {BASE_CATEGORIES=}')
parser_train_models_categories.add_argument('--additional_categories', nargs="+", help=f'List of categories adding to basic categories. {BASE_CATEGORIES=}')
parser_predict = subparsers.add_parser('predict', help='Predict with the given models.')
parser_predict.add_argument('--inputfolder', required=True, help='Path of folder with the models.')
parser_predict.add_argument('--test_set', required=True, help='Name of the csv file containing the test set.')
parser_predict.add_argument('--outfile', required=True, help='Path to outfile csv file with the results.')
parser_evaluate = subparsers.add_parser('evaluate', help='Evaluate the predictions.')
parser_evaluate.add_argument('--inputfile', required=True, help='Path of the csv file with the predictions.')
parser_evaluate.add_argument('--outfile', required=True, help='Path of the json file to write scores.')
parser_evaluate_categories = parser_evaluate.add_mutually_exclusive_group(required=False)
parser_evaluate_categories.add_argument('--all_categories', nargs="+", help=f'List of all categories used. Use only if you want not the basic categories. {BASE_CATEGORIES=}')
parser_evaluate_categories.add_argument('--additional_categories', nargs="+", help=f'List of categories adding to basic categories. {BASE_CATEGORIES=}')
args = parser.parse_args()
if args.command == 'collect_readmes':
collector = collectreadmes.ReadmeCollector()
if args.input_mode == 'csvfile':
collector.addCategoriesFromCsvFile(args.input, args.input_delimiter)
elif args.input_mode == 'url':
collector.addCategory(args.category, [args.input])
if args.awesome_list_mode:
collector.mapAwesomeListsToGithubLinks()
if args.githublinks_file:
collector.dumpGithubLinks(args.githublinks_file)
if args.redownload:
collector.downloadReadmeFiles(args.readme_folder)
collector.createDatabase(args.outfolder, args.readme_folder)
elif args.command == 'preprocess':
preprocess_file(args.preprocess_file)
elif args.command == 'train_test_split':
train_test_split_file(args.train_test_file, args.test_size)
elif args.command == 'merge_csv':
merge_csv_files(args.files, args.outfile)
if args.command == 'train_models':
categories = getCategories(BASE_CATEGORIES, args.all_categories, args.additional_categories)
logthis.say(f"{categories=}")
if args.gridsearch == 'nogridsearch':
import train
train.train_models(args.train_set, args.out_folder, args.results_file, categories, args.evaluation_metric)
elif args.gridsearch == 'bestvectorizer':
import Experiments.best_vectorizer
Experiments.best_vectorizer.train_models(args.train_set, args.test_set, args.out_folder, args.results_file, categories, args.evaluation_metric)
elif args.gridsearch == 'bestsampler':
import Experiments.best_sampler
Experiments.best_sampler.train_models()
elif args.gridsearch == 'bestmodel':
import Experiments.best_model
Experiments.best_model.train_models()
elif args.gridsearch == 'all':
#TODO
pass
if args.command == 'predict':
predictor = Predictor(args.inputfolder, args.test_set, args.outfile).predict()
if args.command == 'evaluate':
categories = getCategories(BASE_CATEGORIES, args.all_categories, args.additional_categories)
logthis.say(f"{categories=}")
evaluator = Evaluator(args.inputfile, set(categories))
evaluator.evaluate()
evaluator.dumpStats(args.outfile)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,387
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Report/Report.py
|
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score,precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_validate
import pickle
import numpy as np
import pandas as pd
#def tn(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 0]
#def fp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 1]
#def fn(y_true, y_pred): return confusion_matrix(y_true, y_pred)[1, 0]
#def tp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[1, 1]
tn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
fp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
fn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
tp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
score_metrics = {'accuracy': accuracy_score,
'precision': precision_score,
'recall': recall_score,
'f1-score': f1_score,
#'p_r_f1_sup': precision_recall_fscore_support,
'tp': tp, 'tn': tn,
'fp': fp, 'fn': fn}
def report(clf, train_name, x_train, y_train, label, name='classifier', cv=5, dict_scoring=None, fit_params=None, save=False):
'''
Function create a metric report automatically with cross_validate function.
@param clf: (model) classifier
@param x: (list or matrix or tensor) training x data
@param y: (list) label data
@param name: (string) name of the model (default classifier)
@param cv: (int) number of fold for cross-validation (default 5)
@param dict_scoring: (dict) dictionary of metrics and names
@param fit_aparams: (dict) add parameters for model fitting
@param save: (bool) determine if the model need to be saved
@return: (pandas.dataframe) dataframe containing all the results of the metrics
for each fold and the mean and std for each of them
'''
if dict_scoring!=None:
score = dict_scoring.copy() # save the original dictionary
for i in score.keys():
if len(set(y_train))>2:
if i in ["precision", "recall", "f1-score"]:
score[i] = make_scorer(score[i], average = 'weighted') # make each function scorer
elif i=="roc_auc":
score[i] = make_scorer(score[i], average = 'weighted', multi_class="ovo",needs_proba=True) # make each function scorer
else:
score[i] = make_scorer(score[i]) # make each function scorer
elif i in ['precision', 'recall', 'f1-score'] :
score[i] = make_scorer(score[i], pos_label=label) # make each function scorer
else:
score[i] = make_scorer(score[i])
try:
scores = cross_validate(clf, x_train, y_train, scoring=score,
return_train_score=True, n_jobs=-1, fit_params=fit_params)
except:
scores = cross_validate(clf, x_train, y_train, scoring=score,
return_train_score=True, fit_params=fit_params)
#print(scores)
# Train test on the overall data
model = clf
model.fit(x_train, y_train)
#features = model[:-1].get_feature_names_out()
#print(f'{label}: ', file=open("output.txt", "a"))
#for i in features:
# print(f'{i}', file=open("output.txt", "a"))
#y_pred = model.predict(X_test)#>0.5).astype(int)
if save:
filename= name+label+".sav"
pickle.dump(model, open('results/models/'+filename, 'wb'))
#csvFileName = f"{label.lower().replace(' ', '_')}.csv"
#with open('results/scoreboards/' + csvFileName, 'r') as csvfile:
# rownum = len(csvfile.readlines())
# initialisation
res = {'PipelineID' : label,
'Pipeline' : name ,
'train_set' : train_name}
for i in scores: # loop on each metric generate text and values
if i == "estimator": continue
for j in enumerate(scores[i]):
res[i+"_cv"+str(j[0]+1)] = j[1]
res[i+"_mean"] = np.mean(scores[i])
# add metrics averall dataset on the dictionary
#print(scores)
#print(score)
del scores['fit_time']
del scores['score_time']
#for i in scores: # compute metrics
# scores[i] = np.append(scores[i] ,score[i.split("test_")[-1]](model, X_test, y_test))
# res[i.split("test_")[-1]+'_overall'] = scores[i][-1]
return pd.DataFrame(data=res.values(), index=res.keys()).T, model
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,388
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Evaluation/evaluation.py
|
import argparse
import csv
import json
import os
import sys
from typing import Any, Callable, Dict, Iterable, Set
import logthis
sys.path.append(os.path.abspath(os.getcwd()) + '/src')
from util.utils import getCategories, BASE_CATEGORIES
def lower_transform(predictions: Iterable[str]) -> Set[str]:
"""
Transforms the items of the given Iterable with str.lower() function.\n
Used as a default transform function.
Params
---------
predictions: (Iterable[str]) Collection of texts to transform.
Return
---------
(Set[str]): Set of transformed items.
"""
return {pred.lower() for pred in predictions}
def csoc_transform_predictions(predictions: Iterable[str], transform_dict: Dict[str, str]) -> Set[str]:
"""
Transforms the items of the given Iterable with str.lower() function and given mapping after.\n
Used to transform predictions of CSOC.
Params
---------
predictions: (Iterable[str]) Collection of texts to transform.
transform_dict: (Dict[str]) Key-value pairs used to map CSOC predictions into known categories.
Return
---------
(Set[str]): Set of transformed items.
"""
ret = set()
logthis.say('original: '+ str(predictions))
for prediction in predictions:
pred = prediction.lower()
if pred in transform_dict:
ret.add(transform_dict[pred])
else:
ret.add(pred)
logthis.say('mapped: '+ str(ret))
return ret
class Evaluator:
"""
This class is used to evaluate the predictions and save the collected statistics.
Methods
----------
resetStats: Initializes or resets the collected stats.
evaluate: Runs the evaluation and collects stats.
dumpStats: Dumps the collected stats in JSON format.
Attributes
---------
inputfile: (str) Used to store the input file with predictions.
categories: (Iterable[str]) Collection of categories to consider when collecting stats.
transformer: (Callable[[Iterable[str]], Set[str]]) Used to store the transform function to transform predictions into categories.
stat_fields: (List[str]) Used to store the fields used in statistics.
prediction_fieldname: (str) Name of the column where the prediction values are stored in the csv file.
"""
def __init__(self,
inputfile: str,
categories: Iterable[str] = None,
transformer: Callable[[Iterable[str]], Set[str]] = lower_transform,
prediction_fieldname: str = 'Predictions'):
"""
Params
---------
inputfile: (str) Used to store the input file with predictions.
categories: (Iterable[str]) Collection of categories to consider when collecting stats.
Defaults to {'natural language processing', 'general', 'sequential', 'computer vision', 'reinforcement learning', 'graphs', 'audio'}.
transformer: (Callable[[Iterable[str]], Set[str]]) Used to store the transform function to transform predictions into categories.
Defaults to lower_transform, which performs str.lower() on predictions.
prediction_fieldname: (str) Name of the column where the prediction values are stored in the csv file.
Defaults to 'Predictions'.
"""
self.inputfile = inputfile
self.categories = set([cat.lower() for cat in categories]) if categories is not None else {'natural language processing', 'general', 'sequential', 'computer vision', 'reinforcement learning', 'graphs', 'audio'}
self.transformer = transformer
self.stat_fields = ['tp', 'tn', 'fp', 'fn', 'support']
self.resetStats()
self.prediction_fieldname = prediction_fieldname
def resetStats(self) -> None:
"""
Initializes or resets the collected stats.
"""
self.stats = {
'overall' : {key: 0 for key in self.stat_fields},
'overall_presentonly' : {key: 0 for key in self.stat_fields},
}
for category in self.categories:
self.stats[category] = {key: 0 for key in self.stat_fields}
def evaluate(self) -> Dict[str, Dict[str, Any]]:
"""
Runs the evaluation and collects stats.
Return
--------
(Dict[str, Dict[str, Any]]) Returns the collected statistics.
"""
with open(self.inputfile) as f:
self.reader = csv.DictReader(f, delimiter=';')
for row in self.reader:
self.stats['overall']['support'] += 1
predictions = self.transformer(row[self.prediction_fieldname].split(','))
labels = lower_transform(row['Labels'].split(','))
for category in self.categories:
self.stats[category]['support'] += 1
if category in predictions:
if category in labels:
self.stats['overall']['tp'] += 1
self.stats[category]['tp'] += 1
else:
self.stats['overall']['fp'] += 1
self.stats[category]['fp'] += 1
else:
if category in labels:
self.stats['overall']['fn'] += 1
self.stats[category]['fn'] += 1
else:
self.stats['overall']['tn'] += 1
self.stats[category]['tn'] += 1
for category in self.categories:
if self.stats[category]['tp'] != 0 or self.stats[category]['fp'] != 0:
for key in self.stat_fields:
self.stats['overall_presentonly'][key] += self.stats[category][key]
self.stats['overall_presentonly']['support'] += self.stats['overall']['support']
for key, val in self.stats.items():
self.stats[key]['precision'] = f"{val['tp']/(1 + val['tp'] + val['fp']):.2f}"
self.stats[key]['recall'] = f"{val['tp']/(1 + val['tp'] + val['fn']):.2f}"
self.stats[key]['sample_num'] = val['tp'] + val['fn']
return self.stats.copy()
def dumpStats(self, outfile: str):
"""
Dumps the collected stats in JSON format.
Params
---------
outfile: (str) Path to the output file.
"""
with open(outfile, 'w') as f:
json.dump(self.stats, f, indent=4)
if __name__ == '__main__':
parser_evaluate = argparse.ArgumentParser('python src/Evaluation/evaluate.py', description='Evaluate the predictions.')
parser_evaluate.add_argument('--inputfile', required=True, help='Path of the csv file with the predictions.')
parser_evaluate.add_argument('--outfile', required=True, help='Path of the json file to write scores.')
parser_evaluate_categories = parser_evaluate.add_mutually_exclusive_group(required=False)
parser_evaluate_categories.add_argument('--all_categories', nargs="+", help=f'List of all categories used. Use only if you want not the basic categories. {BASE_CATEGORIES=}')
parser_evaluate_categories.add_argument('--additional_categories', nargs="+", help=f'List of categories adding to basic categories. {BASE_CATEGORIES=}')
args = parser_evaluate.parse_args()
categories = getCategories(BASE_CATEGORIES, args.all_categories, args.additional_categories)
logthis.say(f"{categories=}")
evaluator = Evaluator(args.inputfile, set(categories))
evaluator.evaluate()
evaluator.dumpStats(args.outfile)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,389
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/ResultStorage.py
|
from pathlib import Path
import pickle
import pandas as pd
class ResultStorage:
"""
This class can be used to store results of training methods. The class stores all the training results and the best model
according to the given evaluation metric.
Methods
-----------
processResult: This method is used to feed the object with the results.
dumpResults: Writes all the training results to the given file in csv format.
dumpBestModel: Saves the best model object with pickle.
"""
class BestModel:
"""
This inner class is used to store the best model so far.
Methods
----------
addModel: Used to add the new model for evaluation.
Attributes
----------
best_score: (float) Used to store the best score so far.
best_model: (object) Used to store the model with the best score so far.
best_pipeline: (str) Used to store the name of the pipeline with the best score so far.
"""
def __init__(self):
self.best_score: float = 0
self.best_model = None
self.best_pipeline = ''
def addModel(self, model, score: float, pipeline: str) -> None:
"""
Used to add the new model for evaluation.
Params
---------
model: (object) The trained model.
score: (float) The score value used for evaluation.
pipeline: (str) Name of the pipeline (as you would like to name it).
"""
if score > self.best_score:
self.best_model = model
self.best_pipeline = pipeline
self.best_score = score
def __init__(self, category: str, evaluation_metric: str = "test_f1-score_mean"):
"""
Params
----------
category: (str) Name of the category the training is running on (used in filenames).
evaluation_metric: (str) The key used to evaluate the best model from training results.
"""
self.bestModel = ResultStorage.BestModel()
self.category = category
self.evaluation_metric = evaluation_metric
self.df_results = pd.DataFrame()
def processResult(self, results: pd.DataFrame, model) -> None:
"""
This method is used to feed the object with the results.
Params
----------
results: (pandas.DataFrame) Result data of the training.
model: (object) The result model of the training.
"""
self.bestModel.addModel(model, results[self.evaluation_metric].values[0], results['Pipeline'].values[0])
self.df_results = pd.concat([self.df_results, results])
def dumpResults(self, filename: str) -> None:
"""
Writes all the training results to the given file in csv format.
Params:
---------
filename: (str) Path to the file where the data will be dumped.
"""
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.df_results.to_csv(filename, mode='a', sep=';', index=False)
def dumpBestModel(self, folder_name: str) -> None:
"""
Saves the best model object with pickle.
Params:
---------
filename: (str) Path to the folder where the object will be saved.
"""
Path(folder_name).mkdir(parents=True, exist_ok=True)
with open(f'{folder_name}/{self.category.replace(" ", "_").lower()}_{self.bestModel.best_pipeline}.sav', 'wb') as f:
pickle.dump(self.bestModel.best_model, f)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,390
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/collectreadmes.py
|
import argparse
from collections import defaultdict
from pathlib import Path
import sys
from typing import Iterable, List, Set, Union
import logthis
import requests
import re
import csv
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
REGEX_LINK = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?ยซยปโโโโ]))"
LOG = logthis.say
class ReadmeCollector:
"""
This class can be used to create a database for training by collecting readme texts of github repositories.
The class can also handle awesome list links and collect all the github repositories found in their readme text.
The output contains all the readme texts in separate files for reach repository and the created database.
If awesome lists are provided, the collected github urls are saved as well.
"""
input_modes = ['csvfile', 'url']
def __init__(self):
self.__categories: defaultdict[str, List[str]] = defaultdict(list)
def __getReadmeUrlFromGithubUrl(self, github_url: str) -> str:
"""
Generates the url of readme file for the given github url.
Params
----------
github_url: (str) The github url.
Return
----------
(str) The url of the readme file.
"""
return 'https://raw.githubusercontent.com/' + '/'.join(github_url.split('/')[-2:]) + '/master/README.md'
def __getReadmeText(self, github_url: str) -> Union[str, None]:
"""
Gets the text for the given github repo or None if not found by the method.
Tries 'README.md' and 'readme.md' files in root folder of repo.
Params
-----------
github_url: (str) The github url.
Return
----------
(str) The text of the readme or None if not found.
"""
readme_url = self.__getReadmeUrlFromGithubUrl(github_url)
r = requests.get(readme_url)
res = None
if r.status_code == 200:
res = r.text
else:
r = requests.get(readme_url.replace('README', 'readme'))
if r.status_code == 200:
res = r.text
return res
def __downloadReadmeTextToFile(self, github_url: str, folder: Path) -> None:
"""
Downloads the readme file of the given repo and writes in the given folder into '{repo_name}.txt' file.
Params
---------
github_url: (str) The github url.
folder: (str) Path to the folder where readme text will be saved.
"""
text = self.__getReadmeText(github_url)
if text is not None:
with open(folder / (self.getRepoNameFromGitHubUrl(github_url) + '.txt'), 'w') as f:
f.write(text)
LOG(f"Downloaded: {github_url=} to {folder=}")
def getRepoNameFromGitHubUrl(self, url: str) -> str:
"""
Generates a repository name from the given github url.
Params
--------
url: (str) The url to generate the name from.
Return
--------
(str) The generated repository name.
"""
return '_'.join(url.split('/')[-2:])
def addCategory(self, category: str, links: Iterable[str]) -> None:
"""
Function to add a sinle category's links to the cateories Dict.
Params
--------
category: (str) Name of the category
links: (iterable[str]) Links to add to category
"""
LOG(f"Adding {category=} with given github_urls, num: {len(links)}")
self.__categories[category.replace('/', '_')].extend(links)
def addCategoriesFromCsvFile(self, path: str, delimiter: str = ';') -> None:
"""
Function to add every category and it's link to the categories Dict.
At this step the links could be awesome list links or github links.
Params
---------
path: (str) Path to the csv file containing the data with 'Label', 'Repo' headers
Examples: 'data/awesome_lists_links/awesome_lists.csv', 'data/awesome_lists_links/repos.csv'.
delimiter: (str) Delimiter of the csv file.
"""
LOG("Adding categories from csvfile.")
with open(path) as f:
reader = csv.DictReader(f, delimiter=delimiter)
for row in reader:
self.__categories[row['Label'].replace('/', '_')].append(row['Repo'])
LOG(f"Added categories: {list(self.__categories.keys())}")
def mapAwesomeListsToGithubLinks(self) -> None:
"""
If awesome list(s) were given in the input, then we have to map those to github links (does in-place).
"""
LOG("Mapping awesome lists to github urls.")
for category in self.__categories.keys():
LOG(f"Mapping {category=}")
with ThreadPoolExecutor() as executor:
res = executor.map(self.__collectGithubUrlsFromAwesomeList, self.__categories[category])
links = set()
for r in res:
links = links.union(r)
self.__categories[category] = list(links)
def __collectGithubUrlsFromAwesomeList(self, input_url: str) -> Set[str]:
"""
Collects github urls from the given awesome list link (or any link).
Params
----------
input_url: (str) The url where the github urls will be collected from.
Return
----------
(Set[str]) Set of the github urls collected.
"""
github_urls = set()
text = self.__getReadmeText(input_url)
if text is not None:
for found in re.finditer(REGEX_LINK, text):
if 'github.com' in found[0]:
github_urls.add(found[0])
return github_urls
def downloadReadmeFiles(self, readmes_folder: str) -> None:
"""
Downloads all the readme files per category. Creates a folder for each category in the given folder.
Params
---------
readmes_folder: (str) Path to the folder where the readmes will be saved.
Example: 'data/awesome_lists_links/readme'
"""
LOG(f"Downloading readme files to {readmes_folder=}")
for category, github_links in self.__categories.items():
readmes_path = Path(readmes_folder) / category
readmes_path.mkdir(parents=True, exist_ok=True)
LOG(f"Downloading readme files for {category=} to '{readmes_path.as_posix()}'")
with ThreadPoolExecutor() as executor:
executor.map(lambda x: self.__downloadReadmeTextToFile(x, readmes_path), github_links)
def dumpGithubLinks(self, outfile: str) -> None:
"""
Dumps (in append mode) github links into a csvfile. Fields: ('Label': category, 'Repo': github url).
Params
---------
outfile: (str) Path to the file where links will be dumped.
Example: 'data/awesome_lists_links/respo.csv'.
"""
LOG(f"Writing github links to '{outfile}'")
path = Path(outfile)
path.parent.mkdir(parents=True, exist_ok=True)
fieldnames = ['Label', 'Repo']
data = {key: [] for key in fieldnames}
for category, links in self.__categories.items():
for link in links:
data['Label'].append(category)
data['Repo'].append(link)
if path.exists():
df = pd.read_csv(path, sep=';')
else:
df = pd.DataFrame([], columns=fieldnames)
df = pd.concat([df, pd.DataFrame(data)])
df.drop_duplicates(inplace=True)
df.to_csv(path, sep=';', index=False)
def createDatabase(self, outfolder: str, readmes_folder: str) -> None:
"""
Looks for the readme files containing readme text linked to the github urls stored in __categories.
Generates a database per category from the readme texts ready for preprocessing or training.
If database exists, only appends the new readme texts.
Params
---------
outfolder: (str) The folder where the databases are stored per category folders.
Example: 'data/new datasets'
readmes_folder: (str) Path to the folder where readmes are stored in separate folders per category.
Example: 'data/awesome_lists_links/readme'
"""
LOG(f"Generating database to '{outfolder}'")
readmes_folder_path = Path(readmes_folder)
fieldnames = ['Label', 'Repo', 'Text']
for category, links in self.__categories.items():
path = Path(f"{outfolder}/{category}/readme_{'_'.join(category.lower().split())}.csv")
path.parent.mkdir(parents=True, exist_ok=True)
LOG(f"Generating database for {category=} to '{path.as_posix()}'")
data = {key: [] for key in fieldnames}
for url in links:
readme_path = Path(readmes_folder_path / category / (self.getRepoNameFromGitHubUrl(url) + '.txt'))
if readme_path.exists():
with open(readme_path) as f:
data['Text'].append('"' + ' '.join([line.replace('\n', ' ') for line in f]) + '"')
data['Repo'].append(url)
data['Label'].append(category)
if path.exists():
df = pd.read_csv(path, sep=';')
else:
df = pd.DataFrame([], columns=fieldnames)
df = pd.concat([df, pd.DataFrame(data)])
df.drop_duplicates(inplace=True)
df.to_csv(path, sep=';', index=False)
def clear(self) -> None:
"""
Resets the class by clearing it's data.
"""
self.__categories.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser("python src/collect_readmes.py",
description="Collect readmes from collected urls from given file rows.",
epilog ="""Example: python3 src/collectreadmes.py --input_mode csvfile --input data/awesome_lists_links/awesome_lists.csv
--githublinks_file data/awesome_lists_links/repos1.csv --awesome_list_mode
--readme_folder data/awesome_lists_links/readme --outfolder data/new_datasets""")
parser.add_argument("--input_mode", required=True, choices=ReadmeCollector.input_modes, help="Set input mode. The input can be given by a csvfile or an url in comand line.")
parser.add_argument("--input", required=True, help="Give the input.")
parser.add_argument("--category", required='url' in sys.argv, help="Set category of input url. (Required if url input_mode is used)")
parser.add_argument('--awesome_list_mode', action=argparse.BooleanOptionalAction, default=False, help='Set mode of links to awesome list.')
parser.add_argument("--githublinks_file", help='Give file to save collected githubs if awesome lists are given.')
parser.add_argument('--readme_folder', required=True, help='Path to the folder where readme files will be saved per category.')
parser.add_argument('--outfolder', required=True, help='Path to the folder, where database per category will be saved.')
parser.add_argument('--redownload', help='Redownload the readmes.', action=argparse.BooleanOptionalAction, default=False)
parser.add_argument('--input_delimiter', help='Set delimiter of input csv file (default: ";").', default=';')
args = parser.parse_args()
collector = ReadmeCollector()
if args.input_mode == 'csvfile':
collector.addCategoriesFromCsvFile(args.input, args.input_delimiter)
elif args.input_mode == 'url':
collector.addCategory(args.category, [args.input])
if args.awesome_list_mode:
collector.mapAwesomeListsToGithubLinks()
if args.githublinks_file:
collector.dumpGithubLinks(args.githublinks_file)
if args.redownload:
collector.downloadReadmeFiles(args.readme_folder)
collector.createDatabase(args.outfolder, args.readme_folder)
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,391
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/bert/train_bert.py
|
import numpy as np
import pandas as pd
from typing import List, Dict
import os
os.system("wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py")
import tensorflow as tf
import tensorflow_hub as hub
from keras.utils import to_categorical
from sklearn import preprocessing
from bert.tokenization import bert_tokenization as tokenization
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
os.system("pip3 install bert-tensorflow==1.0.1")
from bert.tokenization import bert_tokenization as tokenization
import logthis
import sys
from absl import flags
from sklearn.utils import shuffle as sh
from imblearn.under_sampling import RandomUnderSampler
import pickle
import seaborn as sns
logthis.say(f"Version: {tf.__version__}")
logthis.say(f"Eager mode: {tf.executing_eagerly()}")
logthis.say(f"Hub version: {hub.__version__}")
gpu = "available" if tf.config.list_physical_devices("GPU") else "NOT AVAILABLE"
logthis.say(f"GPU is {gpu}")
def get_sampling_strategy(df_train: pd.DataFrame, categories: List[str], cat: str) -> Dict[str, int]:
df =df_train.drop(df_train[df_train['Label'] == 'General'].index)
sizes = df.groupby('Label').size()
indexes = list(sizes.index)
cat_size = sizes[indexes.index(cat)]
# If the category is bigger than the sum of the other categories
#print(df[df[LABEL] != 'Other'].groupby(LABEL).size().sum())
if cat_size > df.groupby('Label').size().sum() - cat_size:
cat_size = df.groupby('Label').size().sum() - cat_size
#other_cat_size = int(cat_size/(len(categories)+1))+1
other_cat_size = cat_size
sampling_strategy = {}
change = 0
for c in categories:
if c == cat:
sampling_strategy[c] = cat_size
elif c not in categories+['Other']:
sampling_strategy[c] = 0
else:
s = other_cat_size
sampling_strategy[c] = min(s, sizes[indexes.index(c)])
change = 0
if sampling_strategy[c] < s:
change += s - sampling_strategy[c]
if sizes[indexes.index(c)] < other_cat_size:
change += other_cat_size - sizes[indexes.index(c)]
else:
change += 0
logthis.say(f'Sampling strategy: {str(sampling_strategy)}',)
return sampling_strategy
def load_data():
train_data = pd.read_csv('data/train_test_data/readme_base_semantic_web_preprocessed_train.csv',sep=';')
train_data = train_data.drop(columns = 'Repo')
train_data = train_data.drop(train_data[train_data.Label == 'General'].index)
X = train_data[['Text']]
y = train_data[['Label']]
cats = list(train_data['Label'].unique())
sampling_strategy = get_sampling_strategy(train_data, cats, 'Natural Language Processing')
rus = RandomUnderSampler(random_state=42, sampling_strategy=sampling_strategy)
X, y = rus.fit_resample(X, y)
train_data = pd.concat([X, y], axis=1)
train_data = sh(train_data)
print(train_data[['Label']].value_counts())
test_data = pd.read_csv('data/train_test_data/readme_base_semantic_web_preprocessed_test.csv',sep=';')
test_data = test_data.drop(columns = 'Repo')
test_data = test_data.drop(test_data[test_data.Label == 'General'].index)
return train_data, test_data
def encoding_the_labels(train_data):
label = preprocessing.LabelEncoder()
y = label.fit_transform(train_data['Label'])
print('0, 1, 2, 3, 4, 5: ', label.inverse_transform([0, 1, 2, 3, 4, 5]))
y = to_categorical(y)
return label, y
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len-len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, max_len=512):
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
lay = tf.keras.layers.Dense(64, activation='relu')(clf_output)
lay = tf.keras.layers.Dropout(0.3)(lay)
lay = tf.keras.layers.Dense(16, activation='relu')(lay)
lay = tf.keras.layers.Dropout(0.3)(lay)
out = tf.keras.layers.Dense(7, activation='softmax')(lay)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=2e-5,
decay_steps=10000,
decay_rate=0.5)
model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
#model.compile(tf.keras.optimizers.Adam(lr=2e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(tf.keras.optimizers.Adam(learning_rate=lr_schedule), loss='categorical_crossentropy', metrics=['accuracy'])
return model
def plot_confusion_matrix(X_test, y_test, model):
y_pred = model.predict(X_test)
y_pred = [np.argmax(i) for i in model.predict(X_test)]
con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy()
con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
label_names = list(range(len(con_mat_norm)))
con_mat_df = pd.DataFrame(con_mat_norm,
index=label_names,
columns=label_names)
figure = plt.figure(figsize=(10, 10))
sns.heatmap(con_mat_df, cmap=plt.cm.Blues, annot=True)
plt.ylabel('True label')
plt.xlabel('Predicted label')
if __name__ == "__main__":
sys.argv=['preserve_unused_tokens=False']
flags.FLAGS(sys.argv)
max_len = 300
train_data, test_data = load_data()
label, y = encoding_the_labels(train_data)
#label = preprocessing.LabelEncoder()
#y = label.fit_transform(train_data['Label'])
#y = to_categorical(y)
m_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'
# m_url = 'https://tfhub.dev/google/small_bert/bert_uncased_L-2_H-512_A-8/2'
bert_layer = hub.KerasLayer(m_url, trainable=True)
tf.gfile = tf.io.gfile
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
train_input = bert_encode(train_data.Text.values, tokenizer, max_len=max_len)
test_input = bert_encode(test_data.Text.values, tokenizer, max_len=max_len)
train_labels = y
labels = label.classes_
logthis.say(labels)
model = build_model(bert_layer, max_len=max_len)
logthis.say(model.summary())
checkpoint = tf.keras.callbacks.ModelCheckpoint('/home/u951/u951196/rolf/data/model_1002/model_bert.h5', monitor='val_accuracy', save_best_only=True, verbose=1)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, verbose=1)
history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=200,
callbacks=[checkpoint, earlystopping],
batch_size=8,
verbose=1
)
with open('/home/u951/u951196/rolf/data/model_1002/history.json', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
logthis.say(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('/home/u951/u951196/rolf/data/model_1002/bert_accuracy.png')
plt.clf()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('/home/u951/u951196/rolf/data/model_1002/bert_loss.png')
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,392
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Experiments/best_vectorizer.py
|
from typing import List
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import logthis
import pandas as pd
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from imblearn.under_sampling import RandomUnderSampler
from sklearn.metrics import confusion_matrix
#from src.ResultStorage import ResultStorage
TEXT = "Text"
LABEL = "Label"
tn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
fp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
fn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
tp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
def filter_dataframe(df, cat):
#count = 0
for ind, row in df.iterrows():
if cat != str(row[LABEL]):
#count += 1
row[LABEL] = 'Other'
#print(f'{cat} filtered {count} rows in training dataset')
def train_models(train: str, categories: List[str] = None) -> None:
if categories is None:
categories = ["Natural Language Processing", "Computer Vision", "Sequential", "Audio", "Graphs", "Reinforcement Learning"]
logthis.say(f'Read files\nTrain dataset: {train} \n')
df_train = pd.read_csv(train, sep=';')
#df_test.drop_duplicates(subset=['Text'], inplace=True, keep='last')
df_train = df_train.drop(columns = 'Repo')
logthis.say('Read done')
for i, cat in enumerate(categories):
#result_storage = ResultStorage(train, cat, evaluation_metric)
#undersample = RandomUnderSampler(sampling_strategy=get_sampling_strategy(df_train, categories, cat))
#undersample = RandomUnderSampler(sampling_strategy='majority')
#x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
ind = i + 1
logthis.say(f'Train test split starts for {cat=} category {ind}/{len(categories)}')
x_train = df_train[TEXT].astype('U')
y_train = df_train[LABEL]
logthis.say(f'Filter starts for {cat=} category {ind}/{len(categories)}')
y_train = y_train.to_frame(LABEL)
filter_dataframe(y_train, cat)
y_train = np.ravel(y_train)
logthis.say(f'Filtering done')
undersample = RandomUnderSampler(sampling_strategy='majority')
x_train, y_train = undersample.fit_resample(x_train.to_frame(TEXT), y_train)
#countvect = CountVectorizer(analyzer="word",token_pattern=r'\w{1,}', max_features=10000, lowercase=True)
#tfidf = TfidfVectorizer(analyzer="word",token_pattern=r'\w{1,}', max_features=10000, lowercase=True)
pipeline = Pipeline(
[
("vect", TfidfVectorizer()),
("clf", LinearSVC(max_iter=900000)),
]
)
parameters = {
"vect__max_df": (1.0, 0.9, 0.8),
#"vect__max_df": [1.0],
"vect__min_df": (0.1, 0.15, 0.2),
#"vect__min_df": [0.0],
'vect__max_features': (None, 1500, 2000, 2500, 3000, 5000, 10000),
#'vect__max_features' : (1500, 2000)
"vect__ngram_range": ((1, 1), (1, 2)), # unigrams or bigrams
}
search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=2, error_score=-1, return_train_score=True, refit='f1_weighted', scoring=['accuracy', 'f1_macro', 'f1_weighted', 'recall_macro', 'recall_weighted', 'precision_macro', 'precision_weighted'])
search.fit(x_train[TEXT], y_train)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
df = pd.DataFrame(search.cv_results_)
df.to_csv('data/search/vectorizer_search_tfidf_majority_negative_samples'+ '_'.join(cat.lower().split(' ')) + '.csv', sep=';')
if __name__ == "__main__":
train_models('data/train_test_data/readme_negative_samples_preprocessed_train.csv')
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,393
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/addNewClass.py
|
import pandas as pd
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('--newdata', dest ='newdata', help='Define the destination of the new class in csv format.', default=False)
parser.add_argument('--data', dest ='data', help='Define the destination of the existing classes in csv format.', default=False)
args = parser.parse_args()
newdata_path = Path(args.newdata)
if not newdata_path.is_file():
raise FileNotFoundError('File does not exist: ' + args['newdata'])
data_path = Path(args.data)
if not data_path.is_file():
raise FileNotFoundError('File does not exist: ' + args['data'])
df_new = pd.read_csv(newdata_path, sep=';')
df = pd.read_csv(data_path, sep=';')
df_concat = pd.concat([df, df_new])
df_concat.to_csv(data_path, sep=';', index = False)
print('Success!')
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,394
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/util/process_results.py
|
import csv
import sys
import json
from pathlib import Path
path = Path(sys.argv[1])
reader = csv.DictReader(open(path), delimiter=';')
res = {}
for row in reader:
if (row['train_set'], row['validation_set']) not in res:
res[(row['train_set'], row['validation_set'])] = {}
if row['PipelineID'] not in res[(row['train_set'], row['validation_set'])]:
res[(row['train_set'], row['validation_set'])][row['PipelineID']] = row
elif res[(row['train_set'], row['validation_set'])][row['PipelineID']]['test_f1-score_mean'] < row['test_f1-score_mean']:
res[(row['train_set'], row['validation_set'])][row['PipelineID']] = row
res[(row['train_set'], row['validation_set'])][row['PipelineID']]['preprocessing'] = 'Preprocessed'
for (train_set, validation_set), val in res.items():
writer = csv.DictWriter(open(f'results/final_results/{train_set[5:-4]}_{validation_set[5:]}', 'a+'), delimiter=';', fieldnames=reader.fieldnames+['preprocessing'])
writer.writeheader()
vals = (v for v in val.values())
writer.writerows(vals)
#for key, val in res.items():
# print(key)
# print(json.dumps(val, indent=4))
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,377,395
|
SoftwareUnderstanding/rolf
|
refs/heads/main
|
/src/Vectorizing/TF_IDF_Vectorizer.py
|
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
def getWordLevelVectorizer(df : pd.DataFrame, textcolname : str) -> TfidfVectorizer:
# word level tf-idf
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=10000)
tfidf_vect.fit(df[textcolname])
print("word level tf-idf done")
return tfidf_vect
def getNGramLevelVectorizer(df : pd.DataFrame, textcolname : str) -> TfidfVectorizer:
# ngram level tf-idf
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', ngram_range=(1,3), max_features=10000)#, min_df=150, max_df=2000)
tfidf_vect_ngram.fit(df[textcolname])
print("ngram level tf-idf done")
return tfidf_vect_ngram
def getCharLevelVectorizer(df : pd.DataFrame, textcolname : str) -> TfidfVectorizer:
# characters level tf-idf
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', ngram_range=(2,3), max_features=10000) #token_pattern=r'\w{1,}',
tfidf_vect_ngram_chars.fit(df[textcolname])
print("characters level tf-idf done")
return tfidf_vect_ngram_chars
|
{"/src/Experiments/best_model.py": ["/src/ResultStorage.py"], "/src/Experiments/best_sampler.py": ["/src/ResultStorage.py"]}
|
23,435,844
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/tests/parsers/directory/test_sample_parser.py
|
import unittest
from collections import OrderedDict
from os import path
from unittest.mock import patch
import iridauploader.parsers as parsers
import iridauploader.parsers.directory.sample_parser as sample_parser
from iridauploader.parsers.exceptions import SampleSheetError
import iridauploader.model as model
path_to_module = path.abspath(path.dirname(__file__))
if len(path_to_module) == 0:
path_to_module = '.'
class TestParseSampleList(unittest.TestCase):
"""
test parsing the list of samples from a sample sheet
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
"""
Given a valid sample sheet, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_path_1 = path.join(path_to_module,
"fake_dir_data", "file_1.fastq.gz")
file_path_2 = path.join(path_to_module,
"fake_dir_data", "file_2.fastq.gz")
run_data_directory_file_list = ["file_1.fastq.gz", "file_2.fastq.gz"]
res = sample_parser.parse_sample_list(sheet_file, run_data_directory_file_list)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], "file_1.fastq.gz")
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], "file_2.fastq.gz")
@patch("iridauploader.parsers.directory.sample_parser._parse_samples")
def test_valid_full_file_path(self, mock_parse_samples):
"""
Given a valid sample sheet with full file paths, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_path_1 = path.join(path_to_module,
"fake_dir_data", "file_1.fastq.gz")
file_path_2 = path.join(path_to_module,
"fake_dir_data", "file_2.fastq.gz")
sample_list = [
model.Sample(
sample_name='my-sample-1',
description="",
sample_number=0,
samp_dict={
('sample_project', '75'),
('File_Forward', path.abspath(file_path_1)),
('File_Reverse', path.abspath(file_path_2))
}
)
]
mock_parse_samples.return_value = sample_list
run_data_directory_file_list = ["file_1.fastq.gz", "file_2.fastq.gz"]
res = sample_parser.parse_sample_list(sheet_file, run_data_directory_file_list)
mock_parse_samples.assert_called_with(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], path.abspath(file_path_1))
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], path.abspath(file_path_2))
def test_no_forward_read(self):
"""
No Valid files were found with names given in sample sheet
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_no_forward.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser.parse_sample_list(file_path, directory)
def test_no_reverse_read(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_no_reverse.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser.parse_sample_list(file_path, directory)
def test_mixed_paired_and_single_reads(self):
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_mixed.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser.parse_sample_list(file_path, directory)
class TestParseSamples(unittest.TestCase):
"""
Test validity or invalidity of parsed samples
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
"""
Given a valid sample sheet, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_name_1 = "file_1.fastq.gz"
file_name_2 = "file_2.fastq.gz"
res = sample_parser._parse_samples(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], file_name_1)
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], file_name_2)
def test_no_forward_read(self):
"""
No Valid files were found with names given in sample sheet
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_forward.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser._parse_samples(file_path)
def test_no_reverse_read(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_reverse.csv")
res = sample_parser._parse_samples(file_path)
# This should have an empty file reverse
self.assertEqual(res[0]["File_Reverse"], "")
def test_no_reverse_read_with_comma(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_reverse_with_comma.csv")
res = sample_parser._parse_samples(file_path)
# This should have an empty file reverse
self.assertEqual(res[0]["File_Reverse"], "")
def test_no_read_files_in_list(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_read_files.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser._parse_samples(file_path)
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,845
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/parsers/__init__.py
|
from iridauploader.parsers.parsers import Parser
from iridauploader.parsers.parsers import supported_parsers
from iridauploader.parsers import exceptions
from iridauploader.parsers import common
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,846
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/core/__init__.py
|
from iridauploader.core import logger
from iridauploader.core import cli_entry
from iridauploader.core import exit_return
from iridauploader.core.cli_entry import VERSION_NUMBER
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,847
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/api/__init__.py
|
from iridauploader.api.api_calls import ApiCalls
from iridauploader.api import exceptions
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,848
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/parsers/parsers.py
|
import logging
from iridauploader.parsers import directory, miseq, miniseq, nextseq
supported_parsers = [
'miseq',
'miseq_v26',
'miseq_v31',
'miniseq',
'nextseq',
'iseq',
'directory',
]
class Parser:
"""
This class handles creation of the different parser objects.
When creating a new parser, the parser type can be added here to enable it's usage.
Like the miseq and directory parser, a new parser class needs the following static methods
find_single_run(directory)
find_runs(directory)
get_required_file_list()
get_sample_sheet(directory)
get_sequencing_run(sample_sheet)
"""
@staticmethod
def factory(parser_type):
"""
This factory creates and returns parser objects
example:
from parser import Parser
my_parser = Parser.factory("directory")
:param parser_type: a String of a valid parser name
:return:
"""
if parser_type == "directory":
logging.debug("Creating directory parser")
return directory.Parser()
if parser_type in ['miseq', 'miseq_v26']:
logging.debug("Creating miseq (v26) parser")
return miseq.Parser()
if parser_type in ['miniseq', 'iseq', 'miseq_v31']:
logging.debug("Creating miniseq/iseq/miseq_v31 parser")
return miniseq.Parser()
if parser_type == "nextseq":
logging.debug("Creating nextseq parser")
return nextseq.Parser()
raise AssertionError("Bad parser creation, invalid parser_type given: {}".format(parser_type))
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,849
|
DarianHole/irida-uploader
|
refs/heads/development
|
/setup.py
|
import setuptools
# Use the readme file as the long description on PyPi
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='iridauploader',
version='0.4.2',
description='IRIDA uploader: upload NGS data to IRIDA system',
url='https://https://github.com/phac-nml/irida-uploader',
author='Jeffrey Thiessen',
author_email='jeffrey.thiessen@canada.ca',
long_description=long_description,
long_description_content_type="text/markdown",
# license specified on github
license='Apache-2.0',
keywords="IRIDA NGS uploader",
packages=setuptools.find_packages(include=['iridauploader',
'iridauploader.*',
]),
install_requires=['rauth',
'requests',
'appdirs',
'cerberus',
'argparse',
'requests-toolbelt',
],
# https://pypi.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
include_package_data=True,
# Test cases makes make it incompatible with pre 3.5
python_requires='>=3.5',
)
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,850
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/gui/__init__.py
|
from iridauploader.gui.gui import MainDialog
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,435,851
|
DarianHole/irida-uploader
|
refs/heads/development
|
/iridauploader/progress/__init__.py
|
from iridauploader.progress.upload_status import get_directory_status, write_directory_status
from iridauploader.progress.upload_signals import signal_worker, send_progress, ProgressData
from iridauploader.progress import exceptions
|
{"/iridauploader/tests/parsers/directory/test_sample_parser.py": ["/iridauploader/parsers/__init__.py", "/iridauploader/model/__init__.py"], "/iridauploader/parsers/__init__.py": ["/iridauploader/parsers/parsers.py"], "/iridauploader/parsers/parsers.py": ["/iridauploader/parsers/__init__.py"]}
|
23,556,695
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/demo.py
|
import os
import sys
import mmcv
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
base_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(base_path) # Manually add base path to python module search path
from detector import Detector
from detector_config import get_config
from utils import imshow_det_bboxes
def main():
cfg = get_config()
detector = Detector(cfg)
test_image = os.path.join(base_path, 'test', 'test.jpg')
img_data = mmcv.imread(test_image)
img_data = mmcv.imresize(img_data, (1280, 720))
score_threshold = 0.45
result = detector.get_detections(img_data, score_threshold)
print('Detections: {}'.format(result))
result = [np.concatenate([result[0]['boxes'][j],
result[0]['scores'][j]],
axis=-1) for j in range(len(result[0]['boxes']))]
imshow_det_bboxes(img_data,
result,
labels=list(range(len(result))),
class_names=("furniture", "door", "cabels", "socks"),
thickness=2,
font_scale=1,
out_file='./test_output.png')
if __name__ == '__main__':
main()
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,696
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/mmdetection/mmdet/datasets/vaccum_dataset.py
|
import cv2
import numpy as np
from tqdm import tqdm
import json
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class VaccumDataset(CustomDataset):
CLASSES = ("furniture", "door", "cabels", "socks")
def __init__(self, *args, **kwargs):
np.random.seed(kwargs.pop('seed'))
path = 'dataset/Scene_1_selected.json'
with open(path) as json_file:
self._dl = json.load(json_file)
kwargs.pop('dataset_name')
kwargs.pop('data_split')
self._split = kwargs.pop('split')
self._split_ratio = kwargs.pop('split_ratio')
super(VaccumDataset, self).__init__(ann_file=None, *args, **kwargs)
def load_annotations(self, *args):
print('Computing image sizes')
frames = list(self._dl.keys())
num_train_samples = int(self._split_ratio * len(frames))
idx = np.arange(0, len(frames))
np.random.shuffle(idx)
idx = idx[:num_train_samples] if self._split == 'train' else idx[num_train_samples:]
samples = []
mapping = {"furniture":0, "door":1, "cabels":2, "socks":3}
for i in tqdm(idx):
frame_info = self._dl[frames[i]]
boxes = []
labels = []
for box in frame_info:
boxes += [[box[3], box[4], box[5], box[6]]]
labels.append(mapping[box[2]])
boxes = np.array(boxes, dtype=np.float32)
labels = np.array(labels, dtype=np.int64)
height, width = box[1], box[0]
samples += [{
'filename': frames[i],
'width': width,
'height': height,
'ann': {
'bboxes': boxes,
'labels': labels,
}
}]
return samples
# pipeline=[
# dict(type='LoadImageFromFile', to_float32=True),
# dict(type='LoadAnnotations', with_bbox=True),
# dict(type='MinIoURandomCrop'),
# dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
# dict(type='PhotoMetricDistortion'),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(
# type='Normalize',
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True),
# dict(type='Pad', size_divisor=32),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
# ]
# d1 = VaccumDataset(pipeline = pipeline, split='train', split_ratio=0.5)
# print ('hi')
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,697
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/xml_json.py
|
import cv2
import os
import pandas as pd
import xml.etree.ElementTree as ET
from collections import defaultdict
import glob
import json
import argparse
def xml2csv(xml_path, result, img_dir):
"""Convert XML to CSV
Args:
xml_path (str): Location of annotated XML file
Returns:
pd.DataFrame: converted json file
"""
print("xml to csv {}".format(xml_path))
xml_list = []
xml_df=pd.DataFrame()
try:
tree = ET.parse(xml_path)
root = tree.getroot()
for member in root.findall('object'):
value = [int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
]
print (value)
if result.get(os.path.join(img_dir,root.find('filename').text)) == None:
result[os.path.join(img_dir,root.find('filename').text)]=[value]
else:
result.get(os.path.join(img_dir,root.find('filename').text)).append(value)
except Exception as e:
print('xml conversion failed:{}'.format(e))
return pd.DataFrame(columns=['filename,width,height','class','xmin','ymin','xmax','ymax'])
return xml_df
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=False,
default='/home/heh3kor/vaccum_thon/dataset/Scene_1_selected',
help="Path to the sequence.")
parser.add_argument('--out_json_path', type=str, required=False,
default='/home/heh3kor/vaccum_thon/dataset/Scene_2_selected.json',
help="Output directory.")
args = parser.parse_args()
return args
def main():
args = _parse_args()
img_dir = '/'.join(args.data_dir.split('/')[4:])
xml_files = glob.glob(f"{args.data_dir}/*.xml")
result = defaultdict(list)
for single_xml in xml_files:
save_info = xml2csv(single_xml, result, img_dir)
with open(args.out_json_path, 'w') as outfile:
json.dump(result, outfile)
if __name__ == '__main__':
main()
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,698
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/detector.py
|
import numpy as np
import torch
from mmdetection.mmdet.apis import init_detector, inference_detector
class Detector:
def __init__(self, cfg):
"""
:param cfg: EasyDict with configuration parameters
:param weights: Path to the trained pytorch weights file.
"""
self.model = init_detector(
cfg.model_cfg,
cfg.trained_weights,
device='cuda:1' if torch.cuda.is_available() else 'cpu')
def _get_detections(self, image, score_threshold):
"""
:param image: An image in the form of numpy array.
:param score_threshold: A float value representing the minimum confidence of detected boxes.
"""
all_boxes = []
all_scores = []
results = inference_detector(self.model, image)
[all_boxes.append(results[i][:, :-1]) for i in range(len(results))]
[all_scores.append(results[i][:, -1]) for i in range(len(results))]
mask = [i>= score_threshold for i in all_scores]
all_boxes = np.array([all_boxes[i][mask[i]] for i in range(len(all_boxes))])
all_scores = np.array([all_scores[i][mask[i]] for i in range(len(all_boxes))])
# Handles the scenario when there are no detections
if len(all_boxes.shape) == 1:
for i in range(all_boxes.shape[0]):
if all_boxes[i].shape[0] == 0:
all_boxes[i] = ( -1 * np.ones([1, 4]))
all_scores[i] = ( np.zeros([1, 1]))
else:
all_scores[i] = np.expand_dims(all_scores[i], axis=1)
detections = {'boxes': all_boxes, 'scores': all_scores}
else:
for i in range(all_boxes.shape[0]):
all_boxes = np.array([-1 * np.ones([1, 4]) for i in range(4)])
all_scores = np.array([np.zeros([1,1]) for i in range(4)])
detections = {'boxes': all_boxes, 'scores': all_scores}
return detections
def get_detections(self, image_list, score_threshold):
"""
:param image_list: A list of images in the form of numpy arrays.
:param score_threshold: A float value representing the minimum confidence of detected boxes.
"""
if not isinstance(image_list, list):
image_list = [image_list]
# get detections for each image
detections = [
self._get_detections(image, score_threshold) for image in image_list
]
return detections
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,699
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/detector_config.py
|
import os
import toml
from easydict import EasyDict
this_path = os.path.abspath(os.path.dirname(__file__))
def get_config():
config_path = os.path.join(this_path, 'detector_config.toml')
cfg = toml.load(config_path)
cfg = EasyDict(cfg) # Convert to EasyDict for simplicity
cfg = _check_and_correct_config(cfg)
return cfg
def _check_and_correct_config(cfg):
assert cfg.mode == 'deploy'
cfg.model_cfg = os.path.join(this_path, cfg.model_cfg) # Convert to absolute path
cfg.trained_weights = os.path.join(this_path, cfg.trained_weights) # Convert to absolute path
assert os.path.exists(cfg.model_cfg) # Model must exist
assert os.path.exists(cfg.trained_weights) # Model must exist
return cfg
get_config()
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,700
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/utils.py
|
import cv2
import numpy as np
import mmcv
def imshow_det_bboxes(img,
bboxes,
labels,
class_names=None,
bbox_color='blue',
text_color='green',
thickness=1,
font_scale=0.35,
out_file=None):
# assert bboxes.ndim == 2
# assert labels.ndim == 1
# assert bboxes.shape[0] == labels.shape[0]
# assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = mmcv.imread(img)
bbox_color = mmcv.color_val(bbox_color)
text_color = mmcv.color_val(text_color)
img = np.ascontiguousarray(img)
for bbox, label in zip(bboxes, labels):
# convert_box = bbox.astype(np.int32)
for bbox_int in bbox:
if all(bbox_int[:4] == np.array([-1.0, -1.0, -1.0, -1.0])): # if unable to predict skip that frame.
continue
left_top = (int(bbox_int[0]), int(bbox_int[1]))
right_bottom = (int(bbox_int[2]), int(bbox_int[3]))
cv2.rectangle(
img, left_top, right_bottom, bbox_color, thickness=thickness)
label_text = class_names[
label] if class_names is not None else f'cls {label}'
if bbox_int.shape[0]>4:
label_text += f'|{bbox_int[-1]:.02f}'
cv2.putText(img, label_text, (int(bbox_int[0]), int(bbox_int[1]) - 2),
cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
if out_file is not None:
return mmcv.imwrite(img, out_file)
return img
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,556,701
|
aiwithshekhar/gladiators
|
refs/heads/master
|
/generate_detection_video.py
|
import argparse
from glob import glob
import os
import time
import cv2
import mmcv
import numpy as np
from detector import Detector
from detector_config import get_config
from utils import imshow_det_bboxes
def make_video(detector, inp_vid_path, outdir, score_threshold=0.45, fps=15, w=1280, h=720):
seq_name = inp_vid_path.split('/')[-1][:-4]+"_detector"
if not os.path.exists(outdir):
os.mkdir(outdir)
output_video = os.path.join(outdir, seq_name + '.avi')
cap = cv2.VideoCapture(inp_vid_path)
fps = cap.get(cv2.CAP_PROP_FPS)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
out = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*"MJPG"), fps,
(w, h))
count = 0
num_warmup = 5
pure_inf_time = 0
while True:
prev_time = time.time()
ret, frame_read = cap.read()
if ret:
if not frame_read.shape == (1280,720):
frame_read = mmcv.imresize(frame_read, (w, h))
result = detector.get_detections(frame_read, score_threshold)
result = [np.concatenate([result[0]['boxes'][j],
result[0]['scores'][j]],
axis=-1) for j in range(len(result[0]['boxes']))]
image = imshow_det_bboxes(frame_read,
result,
labels=list(range(len(result))),
class_names=("furniture", "door", "cabels", "socks"),
thickness=2,
font_scale=0.8)
out.write(image)
count += 1
if count >= num_warmup:
pure_inf_time += time.time() - prev_time
model_fps = (count + 1 - num_warmup) / pure_inf_time
print(f'\rProcessing frame: {count + 1}/{num_frames} | Current FPS: {model_fps}',end='')
else:
break
out.release()
cap.release()
# cv2.destroyAllWindows()
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--inp_vid_path', type=str, required=False,
default='/home/heh3kor/vaccum_thon/test/test_video.mp4',
help="Path to the video.")
parser.add_argument('--out_dir', type=str, required=False, default='output_videos',
help="Output directory.")
parser.add_argument('--score_threshold', type=float, required=False, default=0.45,
help="Score threshold for filtering detections.")
parser.add_argument('--fps', type=int, required=False, default=15,
help="Output video FPS.")
parser.add_argument('--width', type=int, required=False, default=1280,
help="Output video width.")
parser.add_argument('--height', type=int, required=False, default=720,
help="Output video height.")
args = parser.parse_args()
return args
def main():
args = _parse_args()
cfg = get_config()
detector = Detector(cfg)
# frames = glob(os.path.join(args.sequence_path, '*'))
make_video(
detector,
args.inp_vid_path,
args.out_dir,
args.score_threshold,
args.fps,
args.width,
args.height)
if __name__ == '__main__':
main()
|
{"/demo.py": ["/detector.py", "/detector_config.py", "/utils.py"], "/generate_detection_video.py": ["/detector.py", "/detector_config.py", "/utils.py"]}
|
23,583,625
|
Javierfelixuts/fastapi_granjas
|
refs/heads/master
|
/fast_api_app/schemas.py
|
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
#Farm Visited
class FarmsVisitedBase(BaseModel):
frm_visited_date : datetime
FARM_frm_visited_id: int
USER_frm_visited_id : int
class FarmVisitedCreate(FarmsVisitedBase):
pass
class FarmVisited(FarmsVisitedBase):
frm_visited_id: int
class Config:
orm_mode = True
#Farm
class FarmBase(BaseModel):
frm_name: str
frm_created: datetime
class FarmCreate(FarmBase):
pass
class Farm(FarmBase):
frm_id: int
FARM_TYPES_frm_id : int
REGION_frm_id: int
class Config:
orm_mode = True
#FarmType
class FarmTypeBase(BaseModel):
frm_type_name : str
frm_type_created: datetime
class FarmTypeCreate(FarmTypeBase):
pass
class FarmType(FarmTypeBase):
frm_type_id: int
frm_type_enabled: bool
class Config:
orm_mode = True
#Region
class RegionBase(BaseModel):
reg_name: str
reg_created: datetime
class RegionCreate(RegionBase):
pass
class Region(RegionBase):
reg_id: int
class Config:
orm_mode = True
#Item
class ItemBase(BaseModel):
title: str
description: Optional[str] = None
class ItemCreate(ItemBase):
pass
class Item(ItemBase):
id: int
owner_id: int
class Config:
orm_mode = True
#User
class UserBase(BaseModel):
email: str
usr_username : str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
is_active: bool
items: List[Item] = []
usr_username: str
class Config:
orm_mode = True
|
{"/fast_api_app/models.py": ["/fast_api_app/database.py"], "/fast_api_app/main.py": ["/fast_api_app/database.py"]}
|
23,583,626
|
Javierfelixuts/fastapi_granjas
|
refs/heads/master
|
/fast_api_app/models.py
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, CHAR, JSON, TIMESTAMP, text
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import insert
from .database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, nullable=False)
email = Column(String(32), unique=True, index=True)
usr_username = Column(String(45))
hashed_password = Column(String(32))
usr_name = Column(String(45))
usr_lastname = Column(String(45))
usr_created = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP"))
usr_updated = Column(TIMESTAMP)
is_active = Column(Boolean, default=True)
items = relationship("Item", back_populates="owner")
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
title = Column(String(32), index=True)
description = Column(String(32), index=True)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="items")
class FarmType(Base):
__tablename__ = 'farm_types'
frm_type_id = Column(Integer, primary_key=True)
frm_type_name = Column(String(45), nullable=False)
frm_type_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
frm_type_enabled = Column(Integer, nullable=False, default=True)
class Region(Base):
__tablename__ = 'regions'
reg_id = Column(Integer, primary_key=True)
reg_name = Column(String(45), nullable=False)
reg_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
reg_updated = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
reg_enabled = Column(Integer, nullable=False,server_default=text("1"))
class Farm(Base):
__tablename__ = 'farms'
frm_id = Column(Integer, primary_key=True, unique=True, nullable=False, autoincrement=True)
frm_name = Column(String(45), nullable=False)
frm_restriction = Column(JSON)
frm_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
frm_updated = Column(TIMESTAMP)
frm_enabled = Column(Integer, nullable=False, default=1)
FARM_TYPES_frm_id = Column(ForeignKey('farm_types.frm_type_id'), primary_key=True, nullable=False, index=True)
REGION_frm_id = Column(ForeignKey('regions.reg_id'), primary_key=True, nullable=False, index=True)
FARM_TYPES_frm = relationship('FarmType')
REGION_frm = relationship('Region')
class FarmsVisited(Base):
__tablename__ = 'farms_visited'
frm_visited_id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
frm_visited_date = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP"))
FARM_frm_visited_id = Column(ForeignKey('farms.frm_id'), primary_key=True, nullable=False, index=True)
USER_frm_visited_id = Column(ForeignKey('users.id'), primary_key=True, nullable=False, index=True)
FARM_frm_visited = relationship('Farm')
USER_frm_visited = relationship('User')
# coding: utf-8
""" from sqlalchemy import CHAR, Column, ForeignKey, JSON, String, TIMESTAMP, text
from sqlalchemy.dialects.mysql import BIT, Integer, TINYINT
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class FarmType(Base):
__tablename__ = 'farm_types'
frm_type_id = Column(TINYINT, primary_key=True)
frm_type_name = Column(String(45), nullable=False)
frm_type_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
frm_type_enabled = Column(BIT(2), nullable=False)
class Region(Base):
__tablename__ = 'regions'
reg_id = Column(TINYINT, primary_key=True)
reg_name = Column(String(45), nullable=False)
reg_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
reg_updated = Column(TIMESTAMP)
reg_enabled = Column(BIT(2))
class Farm(Base):
__tablename__ = 'farms'
frm_id = Column(Integer, primary_key=True, nullable=False)
frm_name = Column(String(45), nullable=False)
frm_restriction = Column(JSON)
frm_created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
frm_updated = Column(TIMESTAMP)
frm_enabled = Column(BIT(2), nullable=False)
REGION_frm_id = Column(ForeignKey('regions.reg_id'), primary_key=True, nullable=False, index=True)
FARM_TYPES_frm_id = Column(ForeignKey('farm_types.frm_type_id'), primary_key=True, nullable=False, index=True)
FARM_TYPES_frm = relationship('FarmType')
REGION_frm = relationship('Region')
class User(Base):
__tablename__ = 'users'
usr_id = Column(INTEGER, primary_key=True, nullable=False)
usr_username = Column(String(45), nullable=False)
usr_password = Column(CHAR(32))
user_name = Column(String(45))
usr_lastname = Column(String(45))
usr_created = Column(TIMESTAMP)
usr_updated = Column(TIMESTAMP)
REGION_usr_id = Column(ForeignKey('regions.reg_id'), primary_key=True, nullable=False, index=True)
REGION_usr = relationship('Region')
class FarmsVisited(Base):
__tablename__ = 'farms_visited'
frm_visited_id = Column(INTEGER, primary_key=True, nullable=False)
frm_visited_date = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP"))
FARM_frm_visited_id = Column(ForeignKey('farms.frm_id'), primary_key=True, nullable=False, index=True)
USER_frm_visited_id = Column(ForeignKey('users.usr_id'), primary_key=True, nullable=False, index=True)
FARM_frm_visited = relationship('Farm')
USER_frm_visited = relationship('User') """
|
{"/fast_api_app/models.py": ["/fast_api_app/database.py"], "/fast_api_app/main.py": ["/fast_api_app/database.py"]}
|
23,583,627
|
Javierfelixuts/fastapi_granjas
|
refs/heads/master
|
/fast_api_app/main.py
|
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI(debug=True)
origins = [
"http://localhost:3000",
"localhost:3000",
"http://10.0.0.24:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
#Agregar un usuario unico
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
#REGION
#Obtener una region por medio del id
@app.get('/region/{reg_id}', response_model=schemas.Region)
def read_region(reg_id: int, db: Session = Depends(get_db)):
db_region = crud.get_region(db, reg_id=reg_id)
return db_region
#Obtener un listado de regions con un rango 0 - 100
@app.get("/regions/", response_model=List[schemas.Region])
def read_regions(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
regions = crud.get_regions(db, skip=skip, limit=limit)
return regions
#Agregar una region
@app.post("/regions/", response_model=schemas.Region)
def create_region(region: schemas.RegionCreate, db: Session = Depends(get_db)):
db_region = crud.create_region(db=db, region=region)
return db_region
#Agreagr item
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
#Agregar Granja sabiendo el id de la region
@app.post("/regions/{reg_id}/farms/{frm_type_id}", response_model=schemas.Farm)
def create_farm_for_region(
reg_id: int, frm_type_id: int, farm: schemas.FarmCreate, db: Session = Depends(get_db)
):
return crud.create_region_farm(db=db, farm=farm, reg_id=reg_id, frm_type_id=frm_type_id)
#Agregar un tipo de granja
@app.post("/farm_type/", response_model=schemas.FarmType)
def create_farm_type(farm_type: schemas.FarmTypeCreate, db: Session = Depends(get_db)):
db_farm_type = crud.create_farm_type(db=db, farm_type=farm_type)
return db_farm_type
@app.post("/farm_visited/", response_model=schemas.FarmVisited)
def create_far_visited(farm_visited: schemas.FarmVisitedCreate, db: Session = Depends(get_db)):
db_farm_visited = crud.create_farm_visited(db=db, farm_visited=farm_visited)
return db_farm_visited
@app.get("/farm_visited/", response_model=List[schemas.FarmVisited])
def read_farm_visited(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
farm_visited = crud.get_farm_visited(db, skip=skip, limit=limit)
return farm_visited
@app.get("/farm_type/{farm_type_id}", response_model=schemas.FarmType)
def read_farm_type(farm_type_id: int, db: Session = Depends(get_db)):
db_farm_type = crud.get_farm_type(db, farm_type_id=farm_type_id)
return db_farm_type
|
{"/fast_api_app/models.py": ["/fast_api_app/database.py"], "/fast_api_app/main.py": ["/fast_api_app/database.py"]}
|
23,583,628
|
Javierfelixuts/fastapi_granjas
|
refs/heads/master
|
/fast_api_app/database.py
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
#SQLALCHEMY_DATABASE_URL = "mysql+mysqlconnector://ojaialim@localhost:@162.241.62.125:2083/ojaialim_visita_granjas"
#SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
SQLALCHEMY_DATABASE_URL = "postgresql://eigvzozhsppyzp:48666d080fd22f82f7a643f44a9edfb187f4022eac4c0e737cb10891b629486d@ec2-3-214-136-47.compute-1.amazonaws.com:5432/d1ftdum8f76d8f"
engine = create_engine(
SQLALCHEMY_DATABASE_URL
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
{"/fast_api_app/models.py": ["/fast_api_app/database.py"], "/fast_api_app/main.py": ["/fast_api_app/database.py"]}
|
23,583,629
|
Javierfelixuts/fastapi_granjas
|
refs/heads/master
|
/fast_api_app/crud.py
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_email(db: Session, email: str):
return db.query(models.User).filter(models.User.email == email).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
fake_hashed_password = user.password + "notreallyhashed"
db_user = models.User(email=user.email, hashed_password=fake_hashed_password, usr_username=user.usr_username)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
#Regions
def get_regions(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Region).offset(skip).limit(limit).all()
def get_region(db: Session, reg_id: int):
return db.query(models.Region).filter(models.Region.reg_id == reg_id).first()
def create_region(db: Session, region: schemas.RegionCreate):
db_region = models.Region(reg_name=region.reg_name, reg_created=region.reg_created )
db.add(db_region)
db.commit()
db.refresh(db_region)
return db_region
#Farms Visited
def create_farm_visited(db: Session, farm_visited: schemas.FarmVisitedCreate):
db_farm_visited = models.FarmsVisited(frm_visited_date=farm_visited.frm_visited_date, FARM_frm_visited_id=farm_visited.FARM_frm_visited_id, USER_frm_visited_id=farm_visited.USER_frm_visited_id)
db.add(db_farm_visited)
db.commit()
db.refresh(db_farm_visited)
return db_farm_visited
def get_farm_visited(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.FarmsVisited).offset(skip).limit(limit).all()
#Granjas, Typo de Granjas, Regiones
def create_region_farm(db: Session, farm: schemas.FarmCreate, reg_id: int, frm_type_id: int):
db_farm = models.Farm(**farm.dict(), REGION_frm_id=reg_id, FARM_TYPES_frm_id=frm_type_id)
db.add(db_farm)
db.commit()
db.refresh(db_farm)
return db_farm
def create_farm_type(db: Session, farm_type: schemas.FarmTypeCreate):
db_farm_type = models.FarmType(frm_type_name=farm_type.frm_type_name, frm_type_created=farm_type.frm_type_created)
db.add(db_farm_type)
db.commit()
db.refresh(db_farm_type)
return db_farm_type
def get_farm_type(db: Session, farm_type_id: int):
return db.query(models.FarmType).filter(models.FarmType.frm_type_id == farm_type_id).first()
def get_items(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Item).offset(skip).limit(limit).all()
def create_user_item(db: Session, item: schemas.ItemCreate, user_id: int):
db_item = models.Item(**item.dict(), owner_id=user_id)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
|
{"/fast_api_app/models.py": ["/fast_api_app/database.py"], "/fast_api_app/main.py": ["/fast_api_app/database.py"]}
|
23,635,042
|
eugenekk/django
|
refs/heads/main
|
/myproject/article/admin.py
|
from django.contrib import admin
from .models import Article
# Register your models here.
def make_published(self, request, queryset):
queryset.update(status = 'p')
make_published.short_description = '์ ํ๋ articles๋ฅผ Published ์ํ๋ก ๋ณ๊ฒฝํฉ๋๋ค.'
def make_draft(self, request, queryset):
queryset.update(status = 'd')
make_draft.short_description = '์ ํ๋ articles๋ฅผ draft ์ํ๋ก ๋ณ๊ฒฝํฉ๋๋ค.'
def make_withdrawn(self, request, queryset):
queryset.update(status = 'w')
make_withdrawn.short_description = '์ ํ๋ articles๋ฅผ withdrawn ์ํ๋ก ๋ณ๊ฒฝํฉ๋๋ค.'
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'status']
ordering = ['title']
actions = [make_published, make_draft, make_withdrawn]
admin.site.register(Article, ArticleAdmin)
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,043
|
eugenekk/django
|
refs/heads/main
|
/myproject/article/views.py
|
# from myproject import article
from django.shortcuts import render
from .models import Article
# Create your views here.
def article_list(request):
qs = Article.objects.all()
q = request.GET.get('q') # client ๊ฐ ๋ณด๋ธ ๊ฒ์์ฐฝ ์
๋ ฅ๊ฐ
if q:
qs = qs.filter(title__icontains=q)
return render(request, 'article/article_list.html', {'article_list' : qs, 'q':q})
def detail(request, id):
article = Article.objects.get(id=id)
return render(request, "article/article_detail.html", {'article': article})
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,044
|
eugenekk/django
|
refs/heads/main
|
/myproject/book/models.py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=50)
publisher = models.CharField(max_length=50)
publication_date = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('book:list')
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,045
|
eugenekk/django
|
refs/heads/main
|
/myproject/blog/urls.py
|
from re import VERBOSE
from django.urls import path, include, register_converter
from . import views
from .converters import CodeConverter
register_converter(CodeConverter, 'dddd')
urlpatterns = [
path('test3/', views.get_redirect2),
path('file/', views.excel_download),
path('json/', views.json_test),
path('detail/<id>/', views.detail),
path('', views.index2),
path('test/', views.index),
path('test2/<dddd:id>', views.test2), # id๋ผ๋ ์ธ์๋ฅผ ๋ฐ์์ ์
path('test4/', views.test4),
path('test5/', views.test5),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,046
|
eugenekk/django
|
refs/heads/main
|
/myproject/blog/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Post(models.Model):
REGION_CHOICE = (
('Africa', '์ํ๋ฆฌ์นด'),
('Europe', '์ ๋ฝ'),
('Oceania', '์ค์ธ์๋์'),
('Asia', '์์์'),
('North America', '๋ถ์๋ฉ๋ฆฌ์นด'),
('South America', '๋จ์๋ฉ๋ฆฌ์นด'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# author = models.CharField(max_length=20)
title = models.CharField(max_length=100, verbose_name="์ ๋ชฉ", help_text="์ ๋ชฉ์ ์
๋ ฅํด์ฃผ์ธ์. ์ต๋ 100์")
content = models.TextField(verbose_name="๋ด์ฉ")
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
tags = models.CharField(max_length=100, blank=True)
inglat = models.CharField(max_length=50, blank=True, help_text="๊ฒฝ๋, ์๋ ํฌ๋งท์ผ๋ก ์
๋ ฅ")
region = models.CharField(max_length=20, choices=REGION_CHOICE, default='Asia')
# M:M ๊ด๊ณ Tag ํ๋ ์ถ๊ฐ
tag_set = models.ManyToManyField('Tag')
def __str__(self):
return self.title # __str__ ๋ฉ์๋ ์ค๋ฒ๋ผ์ด๋ฉ(p1์์ ๋ด์ฉ์ ๋ณด๊ณ ์ถ์ ๋)
# 1:N ๊ด๊ณ 1(Post) : N(Comment)
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
author = models.CharField(max_length=20)
message = models.TextField()
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.message # __str__ ๋ฉ์๋ ์ค๋ฒ๋ผ์ด๋ฉ(p1์์ ๋ด์ฉ์ ๋ณด๊ณ ์ถ์ ๋)
# M:M ๊ด๊ณ M(Post) : M(Tag) : M:M์ ๋์ค ํ๋์ ์ปฌ๋ผ ์ถ๊ฐํ๋ฉด๋จ(Post์ tag_set ์ปฌ๋ผ ์ถ๊ฐ)
class Tag(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,047
|
eugenekk/django
|
refs/heads/main
|
/myproject/shop/migrations/0003_auto_20210701_1716.py
|
# Generated by Django 2.1.1 on 2021-07-01 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_post_name'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='name',
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,048
|
eugenekk/django
|
refs/heads/main
|
/myproject/article/urls.py
|
from django.urls import path
from . import views
app_name = 'article'
urlpatterns = [
path('', views.article_list, name = 'list'),
path('<id>/detail/', views.detail, name ='detail'),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,049
|
eugenekk/django
|
refs/heads/main
|
/myproject/blog/migrations/0004_post_region.py
|
# Generated by Django 2.1.1 on 2021-07-01 01:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_post_author'),
]
operations = [
migrations.AddField(
model_name='post',
name='region',
field=models.CharField(choices=[('Africa', '์ํ๋ฆฌ์นด'), ('Europe', '์ ๋ฝ'), ('Oceania', '์ค์ธ์๋์'), ('Asia', '์์์'), ('North America', '๋ถ์๋ฉ๋ฆฌ์นด'), ('South America', '๋จ์๋ฉ๋ฆฌ์นด')], default='Asia', max_length=20),
),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,050
|
eugenekk/django
|
refs/heads/main
|
/myproject/blog/views.py
|
from django.http import response
from django.http.response import Http404, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse
from .models import Post
import os
from django.conf import settings
# Create your views here.
# blog App ์์ ๊ตฌํํ ์๋น์ค๋ฅผ ์์ฑํฉ๋๋ค.
def index(request):
post_list = Post.objects.all()
output = ','.join([p.title for p in post_list])
return HttpResponse(output)
def index2(request):
post_list = Post.objects.all()
return render(request, 'blog/index.html', {'post_list':post_list})
# def detail(request, id): #urls.py ์์ ์ธ์๋ก ๋ฐ๊ธฐ๋ก ํ url์ ๋ณด
# try:
# post = Post.objects.get(id=id)
# except Post.DoesNotExist:
# raise Http404("page not found")
# return render(request, 'blog/detail.html', {'post':post})
def detail(request, id): #urls.py ์์ ์ธ์๋ก ๋ฐ๊ธฐ๋ก ํ url์ ๋ณด
post = get_object_or_404(Post, id=id)
return render(request, 'blog/detail.html', {'post':post})
def json_test(request):
music = {'singer':'BTS', 'songs': ['Fake Love', 'DNA', 'ํผ๋๋๋ฌผ', '๋ด๋ ']}
return JsonResponse(music, json_dumps_params={'ensure_ascii': False})
def excel_download(request):
filepath = os.path.join(settings.BASE_DIR, 'demo.xlsx')
filename = os.path.basename(filepath)
with open(filepath, 'rb') as f:
response = HttpResponse(f, content_type = 'application/vnd.ms-excel')
response["Content-Disposition"] = "attachment; filename={}".format(filename)
return response
def get_redirect1(request):
return redirect('/blog/', permanent=True)
def get_redirect2(request):
return redirect('http://google.com')
def test2(request, id):
print(type(id))
return HttpResponse(id)
# 120p Variables ํ
ํ๋ฆฟ ํ๊ทธ ํ
์คํธ ์ํ ๋ณต๋ถ
from django.utils import timezone
class Person:
def __init__(self, name):
self.name = name
def say_hello(self):
return 'hello'
def test4(request):
people = ['Amy', 'Josh', 'Tobey', 'John']
person = Person('Amy')
person_list = []
now = timezone.now()
past_dt = timezone.datetime(1971,8,22,0,0)
criteria_dt = timezone.datetime(2001,3,19,0,0)
future_dt = timezone.datetime(2037,1,1,0,0)
msg = '''
Miracles happen to only those who believe in them.
Think like a man of action and act like man of thought.
Courage is very important. Like a muscle, it is strengthened by use.
Life is the art of drawing sufficient conclusions from insufficient premises.
By doubting we come at the truth.
A man that has no virtue in himself, ever envies virtue in others.
When money speaks, the truth keeps silent.
Better the last smile than the first laughter.
'''
value = '<b>Joel</b> <button>is</button> a <span>slug</span>'
value1 = 'Joel is a slug'
value2 = '<p>Joel is a slug</p>'
value3 = "https://www.example.org/foo?a=b&c=d"
value4 = "Check out www.djangoproject.com"
value5 = "Send questions to foo@example.com"
return render(request, 'test.html', {'people':people, 'person':person, 'person_list':person_list,
'datetime_obj':now, 'past_dt':past_dt, 'criteria_dt':criteria_dt,
'future_dt':future_dt, 'value':value, 'value1':value1,
'value2':value2, 'value3':value3, 'value4':value4,
'value5':value5, 'msg':msg})
# Model๋ก form ๋ง๋ค๊ธฐ
# http://localhost:8000/blog/test5/ : ๊ฐ์ url์์ ๋๊ฐ์ง ์์ฒญ์ GET/POST ๋๋ ์
def test5(request):
if request.method =='POST':
pass
else:
return render(request, 'member.html')
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,051
|
eugenekk/django
|
refs/heads/main
|
/myproject/article/models.py
|
from django.db import models
from django.urls import reverse
# Create your models here.
STATUS_CHOICES = (
('d','Draft'),
('p','Published'),
('w','Withdrawn'),
)
class Article(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('article:detail', args=[self.id])
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,052
|
eugenekk/django
|
refs/heads/main
|
/myproject/book/urls.py
|
from django.urls import path
from django.urls.resolvers import URLPattern
from . import views
app_name = 'book'
urlpatterns = [
path('', views.book_list, name="list"),
path('new/', views.book_new, name="new"),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,053
|
eugenekk/django
|
refs/heads/main
|
/myproject/book/views.py
|
from django.shortcuts import render
from django.views.generic import CreateView
from .models import Book
from .forms import BookForm
from django.http import HttpResponse
# Create your views here.
# book_new = CreateView.as_view(model=Book, fielss='__all__')
def book_new(request):
if request.method == 'POST':
form = BookForm(request.POST) # ๋ฐ์ดํฐ ๋ฐ์ธ๋ฉ
if form.is_valid():
print(form.cleaned_data);
else:
form = BookForm()
return render(request, 'book/book_form.html', {'form':form})
def book_list(request):
return HttpResponse("hello")
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,054
|
eugenekk/django
|
refs/heads/main
|
/myproject/blog/admin.py
|
from django.contrib import admin
from .models import Post, Comment, Tag
from django.utils.safestring import mark_safe
# Register your models here.
# admin ํ์ด์ง์์ Post ํ
์ด๋ธ ๊ด๋ฆฌํ ์ ์๊ฒ ๋ฑ๋ก
# ๋ฐฉ๋ฒ1) admin ์ ๊ณต ๊ทธ๋๋ก ์ฌ์ฉ
# admin.site.register(Post) -> ์๋ ์ปค์คํ
์์ ์ฌ์ฉ
# admin.site.register(Comment)
admin.site.register(Tag)
# ๋ฐฉ๋ฒ2) ์ปค์คํ
ํ๊ธฐ
class PostAdmin(admin.ModelAdmin):
list_display = ['id','title','content_size','content_size2','created_at','updated_at'] # admin์์ ๋ณด์ด๋ ์ปฌ๋ผ ์ด๋ฆ๋ค
list_display_links = ['id', 'title']
fields = ['title','content','user']
list_filter = ['created_at']
search_fields = ['title']
def content_size(self, post):
return '{}๊ธ์'.format(len(post.content))
def content_size2(self, post):
return mark_safe('<strong>{}๊ธ์</strong>'.format(len(post.content)))
content_size.short_description = '๊ธ์์'
content_size2.short_description = '๊ธ์์2'
admin.site.register(Post, PostAdmin)
# ๋ฐฉ๋ฒ3) ์ปค์คํ
ํ๊ธฐ2
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['id', 'post','author','message', 'created_at', 'updated_at']
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,055
|
eugenekk/django
|
refs/heads/main
|
/myproject/book/forms.py
|
from django import forms
from .models import Book
class BookForm(forms.Form):
title = forms.CharField(label="์ ๋ชฉ")
author = forms.CharField(label="์ ์")
publisher = forms.CharField(label="์ถํ์ฌ", required=False)
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,056
|
eugenekk/django
|
refs/heads/main
|
/myproject/shop/migrations/0002_post_name.py
|
# Generated by Django 2.1.1 on 2021-07-01 08:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='name',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
]
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,635,057
|
eugenekk/django
|
refs/heads/main
|
/myproject/shop/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=100)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='shop_post_set')
content = models.TextField(verbose_name="๋ด์ฉ")
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
{"/myproject/article/admin.py": ["/myproject/article/models.py"], "/myproject/article/views.py": ["/myproject/article/models.py"], "/myproject/blog/views.py": ["/myproject/blog/models.py"], "/myproject/book/views.py": ["/myproject/book/models.py", "/myproject/book/forms.py"], "/myproject/blog/admin.py": ["/myproject/blog/models.py"], "/myproject/book/forms.py": ["/myproject/book/models.py"]}
|
23,696,204
|
xinrongl/huawei-rs
|
refs/heads/master
|
/src/aug.py
|
import albumentations as albu
def get_training_augmentation():
train_transform = [
# albu.RandomRotate90(p=1),
# albu.HorizontalFlip(p=0.25)
# albu.RGBShift(
# r_shift_limit=20,
# g_shift_limit=20,
# b_shift_limit=20,
# always_apply=False,
# p=0.5,
# ),
# albu.CLAHE(p=0.5)
# albu.IAAAdditiveGaussianNoise(p=1),
# albu.IAAPerspective(p=0.5),
# albu.OneOf(
# [albu.RandomBrightness(p=1), albu.RandomGamma(p=1)],
# p=0.9,
# ),
# albu.OneOf(
# [
# albu.IAASharpen(p=1),
# albu.Blur(blur_limit=3, p=1),
# albu.MotionBlur(blur_limit=3, p=1),
# ],
# p=0.9,
# ),
# albu.OneOf(
# [albu.RandomContrast(p=1), albu.HueSaturationValue(p=1)],
# p=0.9,
# ),
# albu.Normalize(
# mean=(0.355, 0.384, 0.359),
# std=(0.207, 0.202, 0.21),
# ),
]
return albu.Compose(train_transform)
def get_validation_augmentation():
test_transform = [
# albu.Normalize(
# mean=(0.355, 0.384, 0.359),
# std=(0.207, 0.202, 0.21),
# )
]
return albu.Compose(test_transform)
def get_test_augmentation():
test_transform = [
# albu.Normalize(
# mean=(0.355, 0.384, 0.359),
# std=(0.207, 0.202, 0.21),
# )
]
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
if len(x.shape) > 2:
return x.transpose(2, 0, 1).astype("float32")
else:
return x.astype("int64")
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,205
|
xinrongl/huawei-rs
|
refs/heads/master
|
/tools/plot_logs.py
|
import sys
import matplotlib.pyplot as plt
logfile = sys.argv[1].split()[-1]
logfilename = logfile.split("/")[-1].strip(".log")
def get_val(vals):
return [float(x.split(":")[1].strip()) for x in vals]
loss_train, loss_val = [], []
iou_train, iou_val = [], []
with open(logfile, "r") as f:
for line in f:
if "train iou" not in line:
continue
vals = line.strip("\n").split(" | ")
loss_train.append(vals[3])
loss_val.append(vals[4])
iou_train.append(vals[5])
iou_val.append(vals[6])
if all([loss_train, loss_val, iou_train, iou_val]):
loss_train = get_val(loss_train)
loss_val = get_val(loss_val)
iou_train = get_val(iou_train)
iou_val = get_val(iou_val)
fig, axs = plt.subplots(2, 1)
axs[0].plot(loss_train, label="train")
axs[0].plot(loss_val, label="val")
# start, end = axs[0].get_ylim()
# axs[0].set_yticks(arange(start, end + 0.1, 0.1))
# axs[0].set_yticks(arange(0, 1.1, 0.1))
axs[0].legend(loc="upper right")
axs[0].title.set_text(
f"train loss: {loss_train[-1]:.4f} | val loss: {loss_val[-1]:.4f}"
)
axs[1].plot(iou_train, label="train")
axs[1].plot(iou_val, label="val")
# start, end = axs[1].get_ylim()
# axs[1].set_yticks(arange(start, end + 0.1, 0.1))
axs[1].legend(loc="lower right")
axs[1].title.set_text(
f"train fwiou: {iou_train[-1]:.4f} | val fwiou: {iou_val[-1]:.4f}"
)
fig.suptitle(logfile.split("/")[-1], fontsize=16)
plt.tight_layout()
plt.savefig("logs/vis/%s.png" % logfilename)
else:
print("no record found.")
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,206
|
xinrongl/huawei-rs
|
refs/heads/master
|
/src/logger.py
|
import logging
class MyLogger(logging.Logger):
def __init__(self, level):
super().__init__(__name__)
self.level = level
self.setLevel(self.level)
def set_file_handler(self, filename, level=None):
formatter_file = logging.Formatter("%(asctime)s: %(message)s")
fh = logging.FileHandler(filename=filename)
fh.setLevel(level if level else self.level)
fh.setFormatter(formatter_file)
self.addHandler(fh)
def set_stream_handler(self, level=None):
formatter_stream = logging.Formatter("%(asctime)s: %(message)s")
sh = logging.StreamHandler()
sh.setLevel(level if level else self.level)
sh.setFormatter(formatter_stream)
self.addHandler(sh)
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,207
|
xinrongl/huawei-rs
|
refs/heads/master
|
/src/dataset.py
|
import cv2 as cv
import numpy as np
from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(
self,
data_dir,
split_filename,
classes=None,
augmentation=None,
preprocessing=None,
mode=None,
):
with open(split_filename) as f:
_ids = f.readlines()
self.filenames = list(map(lambda x: x.strip("\n") + ".png", _ids))
if mode == "test":
self.filenames = self.filenames[:30]
self.image_dir = f"{data_dir}/images"
self.label_dir = f"{data_dir}/labels"
self.class_values = classes
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
filename = self.filenames[i]
# read data
image = cv.imread(f"{self.image_dir}/{filename}")
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
# image = image / 255
# image = image - np.array([0.485, 0.456, 0.406])
# image = image / np.array([0.229, 0.224, 0.225])
mask = cv.imread(f"{self.label_dir}/{filename}", -1)
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype("float")
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
return image, mask
def __len__(self):
return len(self.filenames)
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,208
|
xinrongl/huawei-rs
|
refs/heads/master
|
/submission/customize_service.py
|
# -*- coding: utf-8 -*-
import time
from collections import OrderedDict
from io import BytesIO
import cv2 as cv
import log
import numpy as np
import segmentation_models_pytorch as smp
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from metric.metrics_manager import MetricsManager
from model_service.pytorch_model_service import PTServingBaseService
from PIL import Image
from torch import Tensor
from torch.autograd import Variable
Image.MAX_IMAGE_PIXELS = 1000000000000000
logger = log.getLogger(__name__)
aux_params_dict = dict(pooling="avg", dropout=0.5, activation="sigmoid", classes=2)
def torch_rot90(x: Tensor):
"""
Rotate 4D image tensor by 90 degrees
:param x:
:return:
"""
return torch.rot90(x, k=1, dims=(2, 3))
def torch_rot180(x: Tensor):
"""
Rotate 4D image tensor by 180 degrees
:param x:
:return:
"""
return torch.rot90(x, k=2, dims=(2, 3))
def torch_rot270(x: Tensor):
"""
Rotate 4D image tensor by 270 degrees
:param x:
:return:
"""
return torch.rot90(x, k=3, dims=(2, 3))
def torch_transpose(x: Tensor):
"""
Transpose 4D image tensor by main image diagonal
:param x:
:return:
"""
return x.transpose(2, 3)
def torch_none(x: Tensor) -> Tensor:
"""
Return input argument without any modifications
:param x: input tensor
:return: x
"""
return x
def tta_inference(model, image):
output, *_ = model(image)
for aug, deaug in zip(
[torch_rot90, torch_rot180, torch_rot270],
[torch_rot270, torch_rot180, torch_rot90],
):
tmp, *_ = model(aug(image))
x = deaug(tmp)
output += torch.sigmoid(x)
image = torch_transpose(image)
for aug, deaug in zip(
[torch_none, torch_rot90, torch_rot180, torch_rot270],
[torch_none, torch_rot270, torch_rot180, torch_rot90],
):
tmp, *_ = model(aug(image))
x = deaug(tmp)
output += torch.sigmoid(torch_transpose(x))
one_over_8 = float(1.0 / 8.0)
out_l_tta = output * one_over_8
return out_l_tta
class ImageClassificationService(PTServingBaseService):
def __init__(self, model_name, model_path):
self.model_name = model_name
self.model_path = model_path
self.model = smp.Unet(
encoder_name="se_resnext101_32x4d",
encoder_weights=None,
classes=2,
activation="sigmoid",
decoder_attention_type="scse",
decoder_channels=[512, 256, 128, 64, 32],
decoder_use_batchnorm=True,
aux_params=aux_params_dict,
)
self.use_cuda = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("Using GPU for inference")
checkpoint = torch.load(self.model_path)
self.use_cuda = True
self.model = self.model.to(device)
else:
print("Using CPU for inference")
checkpoint = torch.load(self.model_path, map_location="cpu")
# state_dict = OrderedDict()
# for key, value in checkpoint["state_dict"].items():
# tmp = key[7:]
# state_dict[tmp] = value
# self.model.load_state_dict(state_dict)
self.model.load_state_dict(checkpoint["state_dict"])
self.model.eval()
def _preprocess(self, data):
preprocessed_data = {}
for k, v in data.items():
for file_name, file_content in v.items():
img = Image.open(file_content)
img = np.array(img)
preprocessed_data[k] = img
return preprocessed_data
def _inference(self, data):
image = data["input_img"]
data = image
ori_x, ori_y = image.shape[0], image.shape[1]
target_l = 1024
stride = 1024
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if data.max() > 1:
data = data / 255.0
data = data - np.array([0.485, 0.456, 0.406])
data = data / np.array([0.229, 0.224, 0.225])
# data = data - np.array([0.444, 0.425, 0.415])
# data = data / np.array([0.228, 0.221, 0.231])
h, w = image.shape[0], image.shape[1]
new_w, new_h = w, h
if (w - target_l) % stride:
new_w = ((w - target_l) // stride + 1) * stride + target_l
if (h - target_l) % stride:
new_h = ((h - target_l) // stride + 1) * stride + target_l
data = cv.copyMakeBorder(
data, 0, new_h - h, 0, new_w - w, cv.BORDER_CONSTANT, 0
)
data = data.transpose(2, 0, 1)
c, x, y = data.shape
label = np.zeros((x, y))
x_num = (x // target_l + 1) if x % target_l else x // target_l
y_num = (y // target_l + 1) if y % target_l else y // target_l
for i in range(x_num):
for j in range(y_num):
x_s, x_e = i * target_l, (i + 1) * target_l
y_s, y_e = j * target_l, (j + 1) * target_l
x_e = min(x_e, x)
y_e = min(y_e, y)
img = data[:, x_s:x_e, y_s:y_e]
img = img[np.newaxis, :, :, :].astype(np.float32)
img = torch.from_numpy(img)
img = Variable(img.to(device))
out_l = tta_inference(self.model, img)
out_l = out_l.cpu().data.numpy()
out_l = (out_l[0, 1, :, :] > 0.75).astype(np.int8)
out_l = np.argmax(out_l, axis=1)[0]
label[x_s:x_e, y_s:y_e] = out_l.astype(np.int8)
# _label = label.astype(np.int8).tolist()
label = label[:ori_x, :ori_y]
_label = label.astype(np.int8).tolist()
_len, __len = len(_label), len(_label[0])
o_stack = []
for _ in _label:
out_s = {"s": [], "e": []}
j = 0
while j < __len:
if _[j] == 0:
out_s["s"].append(str(j))
while j < __len and _[j] == 0:
j += 1
out_s["e"].append(str(j))
j += 1
o_stack.append(out_s)
result = {"result": o_stack}
return result
def _postprocess(self, data):
return data
def inference(self, data):
pre_start_time = time.time()
data = self._preprocess(data)
infer_start_time = time.time()
# Update preprocess latency metric
pre_time_in_ms = (infer_start_time - pre_start_time) * 1000
logger.info("preprocess time: " + str(pre_time_in_ms) + "ms")
if self.model_name + "_LatencyPreprocess" in MetricsManager.metrics:
MetricsManager.metrics[self.model_name + "_LatencyPreprocess"].update(
pre_time_in_ms
)
data = self._inference(data)
infer_end_time = time.time()
infer_in_ms = (infer_end_time - infer_start_time) * 1000
logger.info("infer time: " + str(infer_in_ms) + "ms")
data = self._postprocess(data)
# Update inference latency metric
post_time_in_ms = (time.time() - infer_end_time) * 1000
logger.info("postprocess time: " + str(post_time_in_ms) + "ms")
if self.model_name + "_LatencyInference" in MetricsManager.metrics:
MetricsManager.metrics[self.model_name + "_LatencyInference"].update(
post_time_in_ms
)
# Update overall latency metric
if self.model_name + "_LatencyOverall" in MetricsManager.metrics:
MetricsManager.metrics[self.model_name + "_LatencyOverall"].update(
pre_time_in_ms + post_time_in_ms
)
logger.info(
"latency: " + str(pre_time_in_ms + infer_in_ms + post_time_in_ms) + "ms"
)
data["latency_time"] = pre_time_in_ms + infer_in_ms + post_time_in_ms
return data
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,209
|
xinrongl/huawei-rs
|
refs/heads/master
|
/train_distillation.py
|
# (1) train segmentation model from the scratch using deeplabv3plus and efficientnet-b3 as encoder.
# cd naic
# python train.py --encoder resnext50_32x4d -w imagenet --arch unet -b 4 -lr 5e-5 -wd 5e-6 --num_workers 12 --num_epoch 100 --parallel
import argparse
import shutil
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
import segmentation_models_pytorch as smp
import torch
import torch.nn
import yaml
from torch import optim
from torch.utils.data import DataLoader
from src import aug
from src.dataset import CustomDataset
from src import metrics
from src.optimizer import RAdam
from src.logger import MyLogger
from src.trainer import TrainDistillEpoch
TIMESTAMP = datetime.now()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
torch.cuda.empty_cache()
torch.backends.cudnn.benchmark = True
with open("cfgs/cfgs.yaml", "r") as f:
cfgs = yaml.load(f, yaml.FullLoader)
parser = argparse.ArgumentParser("Train segmentation model with SMP api.")
parser.add_argument("--encoder", default="efficientnet-b5")
parser.add_argument(
"-w", "--weight", default="ssl", help="Encoder pretrained weight", required=True
)
parser.add_argument("--activation", default="sigmoid")
parser.add_argument(
"--arch",
required=True,
help="model arch: "
+ " | ".join(
["unet", "linkednet", "fpn", "pspnet", "deeplabv3", "deeplabv3plus", "pan"]
),
type=lambda arch: arch.lower(),
)
parser.add_argument("-depth", "--encoder_depth", type=int, default=5)
parser.add_argument("-b", "--batch_size", type=int, default=4)
parser.add_argument("-lr", "--learning_rate", type=float, default=5e-5)
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--threshold", type=float, default=0.8)
parser.add_argument(
"--save_threshold",
type=float,
default=0.5,
help="Save model if model score greater than threshold",
)
parser.add_argument("--patience", default=5, type=int)
parser.add_argument("--num_workers", type=int, default=12)
parser.add_argument("--num_epoch", default=10, type=int)
parser.add_argument("--loglevel", default="INFO")
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--load",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--parallel", action="store_true", help="Use multi-gpus for training"
)
parser.add_argument("--test", action="store_true", help="Test code use small dataset")
args, _ = parser.parse_known_args()
aux_params_dict = dict(pooling="max", dropout=0.5, activation="softmax", classes=2)
arch_dict = {
"unet": smp.Unet(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
decoder_attention_type="scse",
decoder_use_batchnorm=True,
aux_params=aux_params_dict,
),
"linknet": smp.Linknet(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
),
"fpn": smp.FPN(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
),
"pspnet": smp.PSPNet(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
),
"deeplabv3": smp.DeepLabV3(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
),
"deeplabv3plus": smp.DeepLabV3Plus(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
aux_params=aux_params_dict,
),
"pan": smp.PAN(
encoder_name=args.encoder,
encoder_weights=args.weight,
classes=2,
activation=args.activation,
aux_params=aux_params_dict,
),
}
def save_checkpoint(state, filename):
torch.save(state, filename)
def save_best_checkpoint(max_score, checkpoint_path):
best_score_suffix = f"{max_score:.4f}.pth"
pth_files = checkpoint_path.glob("*.pth")
for pth_file in pth_files:
if pth_file.name.endswith(best_score_suffix):
shutil.copy(pth_file, checkpoint_path.join("model_best.pth"))
break
def main():
logger.info(f"Loading images from {cfgs['data_dir']}")
preprocessing_fn = smp.encoders.get_preprocessing_fn(args.encoder, args.weight)
train_dataset = CustomDataset(
data_dir=cfgs["data_dir"],
split_filename=cfgs["split_filename_train"],
classes=[0, 1],
augmentation=aug.get_training_augmentation(),
preprocessing=aug.get_preprocessing(preprocessing_fn),
mode="test" if args.test else None,
)
valid_dataset = CustomDataset(
data_dir=cfgs["data_dir"],
split_filename=cfgs["split_filename_val"],
classes=[0, 1],
augmentation=aug.get_validation_augmentation(),
preprocessing=aug.get_preprocessing(preprocessing_fn),
mode="test" if args.test else None,
)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
)
checkpoints = None
if args.resume:
checkpoints = torch.load(args.resume)
state_dict = OrderedDict()
for key, value in checkpoints["state_dict"].items():
tmp = key[7:]
state_dict[tmp] = value
model = arch_dict[checkpoints["arch"]]
model.load_state_dict(state_dict)
logger.info(
f"=> loaded checkpoint '{args.resume}' (epoch {args.resume.split('_')[-2]})"
)
else:
model = arch_dict[args.arch]
# teacher model
model_t = smp.Unet(
encoder_name="resnext101_32x4d",
encoder_weights="ssl",
classes=2,
activation=args.activation,
decoder_attention_type="scse",
decoder_use_batchnorm=True,
aux_params=aux_params_dict,
)
optimizer = RAdam(
model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay
)
if args.parallel:
model = torch.nn.DataParallel(model).cuda()
model_t = torch.nn.DataParallel(model_t).cuda()
metric = [
metrics.mIoU(threshold=args.threshold),
]
# loss = smp.utils.losses.CrossEntropyLoss()
# loss = smp.utils.losses.BCELoss()
loss = smp.utils.losses.JaccardLoss()
# loss = smp.utils.losses.DiceLoss()
loss2 = torch.nn.KLDivLoss()
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.5, patience=args.patience, verbose=True
)
# train_epoch = smp.utils.train.TrainEpoch(
# model,
# loss=loss,
# metrics=metric,
# optimizer=optimizer,
# device=DEVICE,
# verbose=True,
# )
train_epoch = TrainDistillEpoch(
model=model,
model_t=model_t,
loss=loss,
loss2=loss2,
metrics=metric,
optimizer=optimizer,
device=DEVICE,
verbose=True,
)
valid_epoch = smp.utils.train.ValidEpoch(
model,
loss=loss,
metrics=metric,
device=DEVICE,
verbose=True,
)
checkpoint_path = Path(
f"{cfgs['checkpoint_dir']}/{args.arch}_{args.encoder}/{TIMESTAMP:%Y%m%d%H%M}"
)
if not args.test:
checkpoint_path.mkdir(parents=True, exist_ok=True)
max_score = checkpoints["best_iou"] if args.resume else 0.0
start_epoch = checkpoints["epoch"] + 1 if args.resume else 0
end_epoch = start_epoch + args.num_epoch
logger.info(f"Current best score: {max_score:.4f}")
# optimizer.param_groups[0]["lr"] = args.learning_rate
logger.info(f"Start learning rate: {optimizer.param_groups[0]['lr']:f}")
for epoch in range(start_epoch, end_epoch):
train_logs = train_epoch.run(train_loader)
valid_logs = valid_epoch.run(valid_loader)
train_loss, train_iou = train_logs.values()
val_loss, val_iou = valid_logs.values()
logger.info(
f"epoch [{epoch:03d} | {end_epoch:03d}] | lr: {optimizer.param_groups[0]['lr']:f} | train loss: {train_loss:.4f} | val loss: {val_loss:.4f} | train iou: {train_iou:.4f} | val iou: {val_iou:.4f}"
)
scheduler.step(val_loss)
if all([max_score < val_iou, val_iou > args.save_threshold, not args.test]):
max_score = val_iou
save_checkpoint(
{
"epoch": epoch,
"arch": args.arch,
"encoder": args.encoder,
"encoder_weight": args.weight,
"state_dict": model.state_dict(),
"best_iou": max_score,
"optimizer": optimizer.state_dict(),
"activation": args.activation,
},
checkpoint_path.joinpath(f"epoch_{epoch}_{max_score:.4f}.pth"),
)
logger.info(f"Save checkpoint at {epoch}.")
save_best_checkpoint(max_score, checkpoint_path)
if __name__ == "__main__":
logger_dir = Path(f"{cfgs['log_dir']}/{args.arch}_{args.encoder}")
logger_dir.mkdir(parents=True, exist_ok=True)
logger = MyLogger(args.loglevel)
logger.set_stream_handler()
if not args.test:
logger.set_file_handler(f"{logger_dir}/{TIMESTAMP:%Y%m%d%H%M}.log")
for arg, val in sorted(vars(args).items()):
logger.info(f"{arg}: {val}")
logger.info("\n")
main()
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,210
|
xinrongl/huawei-rs
|
refs/heads/master
|
/tools/train_val_split.py
|
import random
from pathlib import Path, PurePath
DATA_DIR = Path("/home/xinrong/huawei-rs/data/1024_1024")
TRAIN_SIZE = 0.9
def split(data_dir, train_size):
if not isinstance(data_dir, PurePath):
data_dir = Path(data_dir)
image_id = sorted(data_dir.joinpath("images").glob("*"))
target_id = sorted(data_dir.joinpath("labels").glob("*"))
assert len(image_id) == len(
target_id
), "number of images not equal to number of target"
random.shuffle(image_id)
_ids = map(lambda x: x.name.split(".")[0], image_id)
train_ids, val_ids = [], []
for _id in _ids:
if random.random() <= train_size:
train_ids.append(_id)
else:
val_ids.append(_id)
return train_ids, val_ids
def main():
f1 = open(DATA_DIR.joinpath("train.txt"), "w")
f2 = open(DATA_DIR.joinpath("val.txt"), "w")
train_ids, val_ids = split(data_dir=DATA_DIR, train_size=TRAIN_SIZE)
for train_id in train_ids:
f1.write(train_id + "\n")
for val_id in val_ids:
f2.write(val_id + "\n")
f1.close()
f2.close()
if __name__ == "__main__":
main()
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,696,211
|
xinrongl/huawei-rs
|
refs/heads/master
|
/tools/cut_data.py
|
"""
ๆญคไปฃ็ ๅฐ็ปๅฎ็ไธคๅผ ๅพ็ๅๅ
ถๆ ็ญพๅๅๆ1024*1024็ๅฐๅพ๏ผๆญฅ้ฟ้ๆๅฏ่ช่ก่ฐๆด
้ๅไผ้ๆบๅๆ่ฎญ็ป้ๅ้ช่ฏ้๏ผๆฏไพ้ๆไบฆๅฏ้ๆบ่ฐๆด
"""
import os
import numpy as np
from PIL import Image
import cv2 as cv
from tqdm import tqdm
import random
import shutil
Image.MAX_IMAGE_PIXELS = 1000000000000000
TARGET_W, TARGET_H = 1024, 1024
STEP = 992
def cut_images(image_name, image_path, label_path, save_dir, is_show=True):
# ๅๅงๅ่ทฏๅพ
image_save_dir = os.path.join(save_dir, "images/")
if not os.path.exists(image_save_dir):
os.makedirs(image_save_dir)
label_save_dir = os.path.join(save_dir, "labels/")
if not os.path.exists(label_save_dir):
os.makedirs(label_save_dir)
if is_show:
label_show_save_dir = os.path.join(save_dir, "labels_show/")
if not os.path.exists(label_show_save_dir):
os.makedirs(label_show_save_dir)
target_w, target_h = TARGET_W, TARGET_H
overlap = target_h // 8
stride = target_h - overlap
image = np.asarray(Image.open(image_path))
label = np.asarray(Image.open(label_path))
h, w = image.shape[0], image.shape[1]
print("ๅๅงๅคงๅฐ: ", w, h)
if (w - target_w) % stride:
new_w = ((w - target_w) // stride + 1) * stride + target_w
if (h - target_h) % stride:
new_h = ((h - target_h) // stride + 1) * stride + target_h
image = cv.copyMakeBorder(
image, 0, new_h - h, 0, new_w - w, cv.BORDER_CONSTANT, value=0
)
label = cv.copyMakeBorder(
label, 0, new_h - h, 0, new_w - w, cv.BORDER_CONSTANT, value=1
)
h, w = image.shape[0], image.shape[1]
print("ๅกซๅ
่ณๆดๆฐๅ: ", w, h)
def crop(cnt, crop_image, crop_label, is_show=is_show):
_name = image_name.split(".")[0]
image_save_path = os.path.join(
image_save_dir, _name + "_" + str(cnt[0]) + "_" + str(cnt[1]) + ".png"
)
label_save_path = os.path.join(
label_save_dir, _name + "_" + str(cnt[0]) + "_" + str(cnt[1]) + ".png"
)
label_show_save_path = os.path.join(
label_show_save_dir, _name + "_" + str(cnt[0]) + str(cnt[1]) + ".png"
)
if not np.unique(crop_image).size == 1: # remove pure image
cv.imwrite(image_save_path, crop_image)
cv.imwrite(label_save_path, crop_label)
if is_show:
cv.imwrite(label_show_save_path, crop_label * 255)
h, w = image.shape[0], image.shape[1]
for i in tqdm(range((w - target_w) // stride + 1)):
for j in range((h - target_h) // stride + 1):
topleft_x = i * stride
topleft_y = j * stride
crop_image = image[
topleft_y : topleft_y + target_h, topleft_x : topleft_x + target_w
]
crop_label = label[
topleft_y : topleft_y + target_h, topleft_x : topleft_x + target_w
]
crop((i, j), crop_image, crop_label)
# os.remove(image_path)
def get_train_val(data_dir):
all_images_dir = os.path.join(data_dir, "images/")
all_labels_dir = os.path.join(data_dir, "labels/")
train_imgs_dir = os.path.join(data_dir, "train/images/")
if not os.path.exists(train_imgs_dir):
os.makedirs(train_imgs_dir)
val_imgs_dir = os.path.join(data_dir, "val/images/")
if not os.path.exists(val_imgs_dir):
os.makedirs(val_imgs_dir)
train_labels_dir = os.path.join(data_dir, "train/labels/")
if not os.path.exists(train_labels_dir):
os.makedirs(train_labels_dir)
val_labels_dir = os.path.join(data_dir, "val/labels/")
if not os.path.exists(val_labels_dir):
os.makedirs(val_labels_dir)
for name in os.listdir(all_images_dir):
image_path = os.path.join(all_images_dir, name)
label_path = os.path.join(all_labels_dir, name)
if random.randint(0, 10) < 2:
image_save = os.path.join(val_imgs_dir, name)
label_save = os.path.join(val_labels_dir, name)
else:
image_save = os.path.join(train_imgs_dir, name)
label_save = os.path.join(train_labels_dir, name)
shutil.move(image_path, image_save)
shutil.move(label_path, label_save)
total_nums = len(os.listdir(all_images_dir))
train_nums = len(os.listdir(train_imgs_dir))
val_nums = len(os.listdir(val_imgs_dir))
print("all: " + str(total_nums))
print("train: " + str(train_nums))
print("val: " + str(val_nums))
if __name__ == "__main__":
data_dir = "/home/xinrong/huawei-rs/data/raw"
img_name1 = "382.png"
img_name2 = "182.png"
label_name1 = "382_label.png"
label_name2 = "182_label.png"
cut_images(
img_name1,
os.path.join(data_dir, img_name1),
os.path.join(data_dir, label_name1),
data_dir,
)
cut_images(
img_name2,
os.path.join(data_dir, img_name2),
os.path.join(data_dir, label_name2),
data_dir,
)
# get_train_val(data_dir)
|
{"/train_distillation.py": ["/src/dataset.py", "/src/logger.py"]}
|
23,759,104
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/lectures/admin.py
|
from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import Lecture, LectureLike, LectureComment, LectureImages
class PhotoInline(admin.StackedInline):
model = LectureImages
extra = 2
@admin.register(Lecture)
class LectureAdmin(SummernoteModelAdmin):
inlines = [PhotoInline]
list_display = ['title', 'creator', 'location', 'short_description', 'price']
# fields = []
@admin.register(LectureImages)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('id', 'upload_date', 'lecture')
@admin.register(LectureLike)
class LectureLikeAdmin(admin.ModelAdmin):
pass
@admin.register(LectureComment)
class LectureCommentAdmin(admin.ModelAdmin):
pass
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,105
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/notifications/serializers.py
|
from rest_framework import serializers
from . import models
from wecode.users import models as user_models
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'id',
'username'
)
class NotificationSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Notification
fields = '__all__'
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,106
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/posts/urls.py
|
from django.urls import path
from . import views
app_name = "posts"
urlpatterns = [
path("", view=views.Post_list_view.as_view(), name="list"),
path("popular/", view=views.Post_popular.as_view(), name="popular"),
path("<post_id>/", view=views.Post_detail.as_view(), name="detail"),
path("<post_id>/likes/", view=views.Likes.as_view(), name="like post"),
path("<post_id>/unlikes/", view=views.Unlikes.as_view(), name="unlike post"),
path("<post_id>/comments/", view=views.Comments.as_view(), name="comments"),
path("<post_id>/comments/<comment_id>/", view=views.CommentDetail.as_view(), name="comment_detail"),
path("<post_id>/comments/<comment_id>/recomments/", view=views.Recomments.as_view(), name="recomments"),
path("<post_id>/comments/<comment_id>/recomments/<recomment_id>/",
view=views.ReCommentDetail.as_view(), name="recomment_detail"),
path("<user_id>/answers/", view=views.AnswerList.as_view(), name="answer_list"),
]
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,107
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/banners/serializers.py
|
from rest_framework import serializers
from . import models
from wecode.users import models as user_models
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'id',
'username',
'profile_image'
)
class BannerSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Banner
fields = ('id', 'description', 'short_description', 'location', 'creator',
'bannerImage', 'title', 'updated_at', 'url','comma_price')
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,108
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/posts/admin.py
|
from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from . import models
@admin.register(models.Post)
class PostAdmin(SummernoteModelAdmin):
# list_filter = ('post_type')
list_display = ['id','title', 'post_type', 'creator',
'view_count','created_at']
fields = ['title', 'post_type', 'creator', 'description',
'view_count', 'isImportant']
list_display_links = ('title','creator')
@admin.register(models.PostLike)
class LikeAdmin(admin.ModelAdmin):
pass
@admin.register(models.PostComment)
class CommentAdmin(SummernoteModelAdmin):
pass
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,109
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/studygroups/admin.py
|
from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import StudyGroup, StudyLike, StudyComment, StudyImages
class PhotoInline(admin.StackedInline):
model = StudyImages
extra = 2
@admin.register(StudyGroup)
class StudyAdmin(SummernoteModelAdmin):
inlines = [PhotoInline]
list_display = ['title', 'creator', 'location', 'short_description']
@admin.register(StudyImages)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('id', 'upload_date')
@admin.register(StudyLike)
class StudyLikeAdmin(admin.ModelAdmin):
pass
@admin.register(StudyComment)
class StudyCommentAdmin(admin.ModelAdmin):
pass
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,110
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/lectures/serializers.py
|
from rest_framework import serializers
from . import models
from wecode.users import models as user_models
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'id',
'username',
'name',
'profile_image',
'bio'
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.LectureComment
fields = (
'id',
'message',
'creator',
'created_time_mdhm',
'parent',
'groupNumber',
'groupOrder',
'recomment_count',
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.LectureLike
fields = '__all__'
class LectureSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
# lecture_comments = CommentSerializer(read_only=True, many=True)
is_liked = serializers.SerializerMethodField()
class Meta:
model = models.Lecture
fields = ('id', 'description',
'short_description', 'location', 'creator',
'lectureImage', 'title', 'updated_at',
# 'lecture_comments',
'natural_time', 'is_liked', 'like_count', 'attendants',
'comma_price', 'start_date', 'end_date', 'start_time', 'end_time', 'day1', 'day2', 'deadline', 'deadline_date'
)
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
queryset = obj.lecture_likes.all()
for data in queryset:
if data.creator.id == request.user.id:
return True
return False
return False
@staticmethod
def setup_eager_loading(queryset):
""" Perform necessary eager loading of data. """
# select_related for "to-one" relationships
queryset = queryset.select_related('creator')
# prefetch_related for "to-many" relationships
queryset = queryset.prefetch_related(
'lecture_comments', 'lecture_likes', 'lecture_likes__creator')
return queryset
class LectureDetailSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
lecture_comments = CommentSerializer(read_only=True, many=True)
is_liked = serializers.SerializerMethodField()
attend_users = FeedUserSerializer(read_only=True, many=True)
wish_users = FeedUserSerializer(read_only=True, many=True)
class Meta:
model = models.Lecture
fields = ('id', 'description',
'short_description', 'location', 'creator',
'lectureImage', 'title', 'updated_at', 'lecture_comments',
'natural_time', 'is_liked', 'like_count',
'comma_price', 'start_date', 'end_date', 'start_time', 'end_time', 'day1', 'day2',
'attend_users', 'wish_users',
'career1', 'career2', 'contents', 'curriculum1', 'curriculum2', 'attendants', 'url', 'deadline_date', 'deadline'
)
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
queryset = obj.lecture_likes.all()
for data in queryset:
if data.creator.id == request.user.id:
return True
return False
return False
class UserUseLectureSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Lecture
fields = ('id', 'description', 'short_description', 'location', 'creator',
'lectureImage', 'title', 'updated_at', 'natural_time', 'attendants',
'comma_price', 'start_date', 'end_date', 'start_time', 'end_time', 'day1', 'day2', 'deadline', 'deadline_date')
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = models.Lecture
fields = ('id','description')
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,111
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/posts/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.pagination import PageNumberPagination
from rest_framework.filters import SearchFilter
from . import models, serializers
from wecode.users import serializers as user_serializers
from wecode.users import models as user_models
from wecode.notifications import views as notification_views
from django.shortcuts import get_object_or_404
class AnswerList(APIView):
pagination_class = PageNumberPagination
def get(self, request, user_id, *args, **kwargs):
qs = models.Post.objects.filter(creator__id=user_id)
serializer = serializers.PostAnswerTitleSerializer(qs, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class Post_list_view(generics.ListCreateAPIView):
serializer_class = serializers.PostSerializer
filter_backends = [SearchFilter]
search_fields = ['title', 'description', 'creator__username']
pagination_class = PageNumberPagination
def get_queryset(self):
queryset = models.Post.objects.prefetch_related('post_comments', 'post_comments__creator', 'post_likes')
queryset = queryset.select_related('creator')
post_type = self.request.query_params.get('type', None)
if post_type is not None:
queryset = queryset.filter(post_type=post_type)
return queryset
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.PostDetailSerializer
return serializers.PostSerializer
def get_serializer_context(self):
return {'request': self.request}
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class Post_popular(APIView):
def get(self, request, format=None):
queryset = models.Post.objects.prefetch_related('post_comments', 'post_comments__creator','post_likes')
queryset = queryset.select_related('creator')
qna_post = queryset.filter(post_type='qna')[:6]
free_post = queryset.filter(post_type='free')[:6]
ask_post = queryset.filter(post_type='ask')[:6]
queryset = [x for x in qna_post] + [y for y in free_post] + [z for z in ask_post]
serializer = serializers.PostSerializer(queryset, many=True, context={'request':request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class Post_detail(APIView):
def find_own_post(self, post_id, user):
try:
post = models.Post.objects.get(id=post_id, creator=user)
return post
except models.Post.DoesNotExist:
return None
def get(self, request, post_id, format=None):
post = models.Post.objects.prefetch_related(
'post_comments','post_comments__post', 'post_comments__creator',
'post_likes','post_likes__creator'
).select_related('creator').get(id=post_id)
if post is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
post.view_count = post.view_count + 1
post.save()
serializer = serializers.PostDetailSerializer(post, context={'request':request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, post_id, format=None):
user = request.user
post = self.find_own_post(post_id, user)
if post is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.PostSerializer(
post, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_302_FOUND)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, post_id, format=None):
user = request.user
post = self.find_own_post(post_id, user)
if post is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
post.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Likes(APIView):
def get(self, request, post_id, format=None):
likes = models.PostLike.objects.filter(post__id=post_id)
like_creator_ids = likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creator_ids)
serializer = serializers.FeedUserSerializer(users, many=True, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, post_id, format=None):
user = request.user
try:
found_post = models.Post.objects.get(id=post_id)
except models.Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisting_like = models.PostLike.objects.get(
creator=user,
post=found_post
)
return Response(status=status.HTTP_302_FOUND)
except models.PostLike.DoesNotExist:
new_like = models.PostLike.objects.create(
creator=user,
post=found_post
)
new_like.save()
return Response(status=status.HTTP_201_CREATED)
class Unlikes(APIView):
def delete(self, request, post_id, format=None):
user = request.user
try:
preexisting_like = models.PostLike.objects.get(
creator=user,
post__id=post_id
)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.PostLike.DoesNotExist:
return Response(status=status.HTTP_302_FOUND)
class Comments(APIView):
def get(self, request, post_id, format=None):
try:
comments = models.PostComment.objects.filter(post__id=post_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, post_id, format=None):
user = request.user
try:
found_post = models.Post.objects.get(id=post_id)
except models.Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
count = models.PostComment.objects.filter(post__id=post_id, parent=0).count()
serializer.save(creator=user, post=found_post, groupNumber=count + 1)
notification_views.create_notification(
user, found_post.creator, 'comment', post=found_post, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentDetail(APIView):
def find_own_comment(self, comment_id, user):
try:
comment = models.PostComment.objects.get(id=comment_id, creator=user)
return comment
except models.PostComment.DoesNotExist:
return None
serializer.save(creator=user)
def put(self, request, post_id, comment_id, format=None):
user = request.user
comment = self.find_own_comment(comment_id, user)
if comment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
comment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_302_FOUND)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, post_id, comment_id, format=None):
user = request.user
try:
comment_to_delete = models.PostComment.objects.get(
id=comment_id, post__id=post_id, creator=user)
if comment_to_delete.recomment_count == 0:
comment_to_delete.delete()
else:
comment_to_delete.message = "์ญ์ ๋ ๋๊ธ์
๋๋ค."
comment_to_delete.save()
except models.PostComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Recomments(APIView):
def get(self, request, post_id, comment_id, format=None):
try:
comments = models.PostComment.objects.filter(post__id=post_id, parent=comment_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, post_id, comment_id, format=None):
user = request.user
try:
found_post = models.Post.objects.get(id=post_id)
found_comment = models.PostComment.objects.get(id=comment_id, post__id=post_id)
except models.Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
groupOrder = models.PostComment.objects.filter(post__id=post_id, parent=comment_id).count() + 1
serializer.save(creator=user, post=found_post, parent=comment_id,
groupNumber=found_comment.groupNumber, groupOrder=groupOrder)
notification_views.create_notification(user, found_post.creator,
'post_recomment', post=found_post, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ReCommentDetail(APIView):
def find_own_post(self, post_id, user):
try:
post = models.Post.objects.get(id=post_id, creator=user)
return post
except models.Post.DoesNotExist:
return None
def find_own_recomment(self, comment_id, recomment_id, user):
try:
recomment = models.PostComment.objects.get(id=recomment_id, parent=comment_id, creator=user)
return recomment
except models.PostComment.DoesNotExist:
return None
def get(self, request, post_id, comment_id, recomment_id, format=None):
user = request.user
try:
recomment = models.PostComment.objects.get(
id=recomment_id, post__id=post_id, parent=comment_id)
except models.PostComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(recomment)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, post_id, comment_id, recomment_id, format=None):
user = request.user
recomment = self.find_own_recomment(comment_id, recomment_id, user)
if recomment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
recomment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, post_id, comment_id, recomment_id, format=None):
user = request.user
try:
comment_to_delete = models.PostComment.objects.get(
id=recomment_id, post__id=post_id, parent=comment_id, creator=user)
comment_to_delete.delete()
except models.PostComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,112
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/notifications/models.py
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wecode.users import models as user_models
from wecode.lectures import models as lecture_models
from wecode.studygroups import models as study_models
from wecode.posts import models as post_models
class Notification(lecture_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'),
('comment', 'Comment'),
('follow', 'Follow'),
)
creator = models.ForeignKey(user_models.User, related_name='creator', on_delete=models.CASCADE)
to = models.ForeignKey(user_models.User, related_name='to', on_delete=models.CASCADE)
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
lecture = models.ForeignKey(lecture_models.Lecture, on_delete=models.CASCADE, null=True, blank=True)
study = models.ForeignKey(study_models.StudyGroup, on_delete=models.CASCADE, null=True, blank=True)
post = models.ForeignKey(post_models.Post, on_delete=models.CASCADE, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,113
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/notifications/migrations/0002_auto_20181026_1847.py
|
# Generated by Django 2.0.6 on 2018-10-26 09:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('posts', '0001_initial'),
('studygroups', '0001_initial'),
('notifications', '0001_initial'),
('lectures', '0002_auto_20181026_1847'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='notification',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notification',
name='lecture',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lectures.Lecture'),
),
migrations.AddField(
model_name='notification',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
migrations.AddField(
model_name='notification',
name='study',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='studygroups.StudyGroup'),
),
migrations.AddField(
model_name='notification',
name='to',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to', to=settings.AUTH_USER_MODEL),
),
]
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,114
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/posts/models.py
|
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from wecode.users import models as user_models
from django.contrib.humanize.templatetags.humanize import naturaltime
from time import strftime
import datetime
@python_2_unicode_compatible
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True) # first created
updated_at = models.DateTimeField(auto_now=True) # last-modified
class Meta:
abstract = True
@python_2_unicode_compatible
class Post(TimeStampedModel):
""" Post Model """
TYPE_CHOICES = (
('qna', 'Q&A'),
('free', '์์ ๊ฒ์ํ'),
('ask', '๋ฌธ์์ฌํญ')
)
title = models.CharField(max_length=200)
post_type = models.CharField(max_length=80, choices=TYPE_CHOICES, null=True)
creator = models.ForeignKey(
user_models.User, null=True, related_name='posts', on_delete=models.CASCADE
)
description = models.TextField(null=True)
view_count = models.IntegerField(default=0)
isImportant = models.NullBooleanField(default=False)
@property
def created_time_mdhm(self):
return self.created_at.strftime("%m/%d %H:%M")
@property
def created_time_ymdhm(self):
return self.created_at.strftime("%y/%m/%d %H:%M")
@property
def created_time_ymd(self):
return self.created_at.strftime("%y/%m/%d")
@property
def natural_time(self):
return naturaltime(self.created_at)
@property
def like_count(self):
return self.post_likes.all().count()
@property
def comment_count(self):
return self.post_comments.all().count()
def __str__(self):
return '{} - {}'.format(self.title, self.creator)
class Meta:
ordering = ['-isImportant','-created_at']
@python_2_unicode_compatible
class PostComment(TimeStampedModel):
""" PostComment Model """
message = models.TextField()
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.CASCADE)
post = models.ForeignKey(Post, null=True, related_name='post_comments', on_delete=models.CASCADE)
parent = models.IntegerField(default=0, null=True)
groupNumber = models.IntegerField(default=0, null=True)
groupOrder = models.IntegerField(default=0, null=True)
class Meta:
ordering = ['groupNumber', 'groupOrder']
def __str__(self):
return self.message
@property
def created_time_mdhm(self):
return self.created_at.strftime("%m/%d %H:%M")
@property
def recomment_count(self):
queryset = self.post.count()
return len(queryset)
@python_2_unicode_compatible
class PostLike(TimeStampedModel):
""" Like Model """
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.CASCADE)
post = models.ForeignKey(Post, null=True, related_name='post_likes', on_delete=models.CASCADE)
def __str__(self):
return 'User: {} - Post Caption: {}'.format(self.creator.username, self.post.title)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,115
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/users/serializers.py
|
from django.contrib.auth import get_user_model # If used custom user model
from rest_framework import serializers
from rest_auth.registration.serializers import RegisterSerializer
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from . import models
from wecode.lectures import serializers as lectures_serializers
from wecode.studygroups import serializers as studygroups_serializers
from wecode.posts.models import Post
class PasswordResetSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = (
'email'
)
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ('id','username')
class UserSerializer(serializers.ModelSerializer):
lectures = lectures_serializers.UserUseLectureSerializer(many=True, read_only=True)
attend_lectures = lectures_serializers.UserUseLectureSerializer(many=True, read_only=True)
wish_lectures = lectures_serializers.UserUseLectureSerializer(many=True, read_only=True)
studygroups = studygroups_serializers.UserUseStudySerializer(many=True, read_only=True)
attend_studygroups = studygroups_serializers.UserUseStudySerializer(many=True, read_only=True)
wish_studygroups = studygroups_serializers.UserUseStudySerializer(many=True, read_only=True)
class Meta:
model = models.User
fields = ('id', 'username', 'email', 'name', 'lectures','studygroups',
'date_joined', 'profile_image', 'bio', 'website', 'phone', 'gender',
'attend_lectures', 'wish_lectures',
'attend_studygroups', 'wish_studygroups'
)
class BasicUserSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ('id', 'username', 'password', 'email')
write_only_fields = ('password',)
read_only_fields = ('id',)
def create(self, validated_data):
user = models.User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
)
user.set_password(validated_data['password'])
user.save()
return user
class UserProfileSerializer(serializers.ModelSerializer):
# images = images_serializers.CountImageSerializer(many=True, read_only=True)
post_count = serializers.ReadOnlyField()
answer_count = serializers.SerializerMethodField()
followers_count = serializers.ReadOnlyField()
following_count = serializers.ReadOnlyField()
class Meta:
model = models.User
fields = (
'profile_image',
'username',
'name',
'bio',
'website',
'post_count',
'followers_count',
'following_count',
'answer_count'
)
def get_answer_count(self, obj):
post_qs = Post.objects.filter(creator__username=obj.username)
return post_qs.count()
class ListUserSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = (
'id',
'profile_image',
'username',
'name'
)
class SignUpSerializer(RegisterSerializer):
name = serializers.CharField(required=True, write_only=True)
def get_cleaned_data(self):
return {
'name': self.validated_data.get('name', ''),
'username': self.validated_data.get('username', ''),
'password': self.validated_data.get('password', ''),
'email': self.validated_data.get('email', '')
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
setup_user_email(request, user, [])
user.save()
return user
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,116
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/studygroups/migrations/0001_initial.py
|
# Generated by Django 2.0.6 on 2018-10-26 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StudyComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('parent', models.IntegerField(default=0, null=True)),
('groupNumber', models.IntegerField(default=0, null=True)),
('groupOrder', models.IntegerField(default=0, null=True)),
],
options={
'ordering': ['groupNumber', 'groupOrder'],
},
),
migrations.CreateModel(
name='StudyGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('studyImage', models.ImageField(null=True, upload_to='')),
('title', models.CharField(max_length=200)),
('location', models.CharField(blank=True, max_length=200)),
('short_description', models.TextField(blank=True)),
('description', models.TextField(blank=True)),
('attendants', models.PositiveIntegerField(default=0)),
('price', models.IntegerField(null=True)),
('deadline', models.DateField(null=True)),
('startDate', models.DateField(null=True)),
('endDate', models.DateField(null=True)),
('startTime', models.TimeField(null=True)),
('endTime', models.TimeField(null=True)),
('day1', models.CharField(blank=True, max_length=200, null=True)),
('day2', models.CharField(blank=True, max_length=200, null=True)),
('career1', models.TextField(blank=True)),
('career2', models.TextField(blank=True)),
('contents', models.TextField(blank=True)),
('curriculum1', models.TextField(blank=True)),
('curriculum2', models.TextField(blank=True)),
('url', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='StudyImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='photo/%Y/%m')),
('upload_date', models.DateTimeField(auto_now_add=True, verbose_name='Upload Date')),
],
),
migrations.CreateModel(
name='StudyLike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
]
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,117
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/banners/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.renderers import StaticHTMLRenderer
from rest_framework.decorators import renderer_classes
from . import models, serializers
from wecode.users import serializers as user_serializers
from wecode.users import models as user_models
from wecode.notifications import views as notification_views
class banner_list_view(APIView):
@renderer_classes((StaticHTMLRenderer))
def get(self, request, format=None):
banners = models.Banner.objects.all()
serializer = serializers.BannerSerializer(banners, many=True, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
user = request.user
serializer = serializers.BannerSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class banner_detail(APIView):
def find_own_banner(self, banner_id, user):
try:
banner = models.Banner.objects.get(id=banner_id, creator=user)
return banner
except models.Banner.DoesNotExist:
return None
@renderer_classes((StaticHTMLRenderer))
def get(self, request, banner_id, format=None):
user = request.user
banner = self.find_own_banner(banner_id, user)
try:
banner = models.Banner.objects.get(id=banner_id)
except models.Banner.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# if banner is None:
# return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.BannerSerializer(banner, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, banner_id, format=None):
user = request.user
banner = self.find_own_banner(banner_id, user)
if banner is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.BannerSerializer(
banner, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_204_NO_CONTENT)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, banner_id, format=None):
user = request.user
banner = self.find_own_banner(banner_id, user)
if banner is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
banner.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,118
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/studygroups/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from . import models, serializers
from wecode.users import serializers as user_serializers
from wecode.users import models as user_models
from wecode.notifications import views as notification_views
from django.shortcuts import get_object_or_404
class study_list_view(generics.ListCreateAPIView):
queryset = models.StudyGroup.objects.prefetch_related('study_likes')
queryset = queryset.prefetch_related('study_likes__creator')
queryset = queryset.select_related('creator')
serializer_class = serializers.StudySerializer
filter_backends = [SearchFilter]
search_fields = ['title', 'id']
pagination_class = PageNumberPagination
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.StudySerializer
return serializers.StudySerializer
def get_serializer_context(self):
return {'request': self.request}
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class study_detail(APIView):
def find_own_study(self, study_id, user):
try:
study = models.StudyGroup.objects.get(id=study_id, creator=user)
return study
except models.StudyGroup.DoesNotExist:
return None
def get(self, request, study_id, format=None):
study = models.StudyGroup.objects.prefetch_related(
'study_likes','study_likes__creator',
'study_comments', 'study_comments__creator',
'wish_users', 'attend_users'
).select_related('creator').get(id=study_id)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.StudyDetailSerializer(study, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id, user)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.StudySerializer(
study, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id, user)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
study.delete()
return Response(status=status.HTTP_201_CREATED)
class Likes(APIView):
def get(self, request, study_id, format=None):
study_likes = models.StudyLike.objects.filter(study__id=study_id)
like_creator_ids = study_likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creator_ids)
serializer = user_serializers.FeedUserSerializer(users, many=True, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, study_id, format=None):
user = request.user
try:
found_study = models.StudyGroup.objects.get(id=study_id)
except models.StudyGroup.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisting_like = models.StudyLike.objects.get(
creator=user,
study=found_study
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.StudyLike.DoesNotExist:
new_like = models.StudyLike.objects.create(
creator=user,
study=found_study
)
new_like.save()
# notification_views.create_notification(user, found_image.creator, 'like', found_image)
return Response(status=status.HTTP_201_CREATED)
class Unlikes(APIView):
def delete(self, request, study_id, format=None):
user = request.user
try:
preexisting_like = models.StudyLike.objects.get(
creator=user,
study__id=study_id
)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.StudyLike.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class Comments(APIView):
def get(self, request, study_id, format=None):
try:
comments = models.StudyComment.objects.select_related('creator','study').filter(study__id=study_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.StudyGroup.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, study_id, format=None):
user = request.user
try:
found_study = models.StudyGroup.objects.get(id=study_id)
except models.StudyGroup.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
count = models.StudyComment.objects.filter(study__id=study_id, parent=0).count()
serializer.save(creator=user, study=found_study, groupNumber=count + 1)
notification_views.create_notification(
user, found_study.creator, 'comment', study=found_study, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentDetail(APIView):
def find_own_comment(self, comment_id, user):
try:
comment = models.StudyComment.objects.get(id=comment_id, creator=user)
return comment
except models.StudyComment.DoesNotExist:
return None
def get(self, request, study_id, comment_id, format=None):
user = request.user
try:
comment = models.StudyComment.objects.select_related(
'study', 'creator').get(id=comment_id, study__id=study_id)
except models.StudyComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(comment)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, study_id, comment_id, format=None):
user = request.user
comment = self.find_own_comment(comment_id, user)
if comment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
comment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, study_id, comment_id, format=None):
user = request.user
try:
comment_to_delete = models.StudyComment.objects.get(
id=comment_id, study__id=study_id, creator=user)
if comment_to_delete.recomment_count == 0:
comment_to_delete.save()
comment_to_delete.delete()
else:
comment_to_delete.message = "์ญ์ ๋ ๋๊ธ์
๋๋ค."
comment_to_delete.save()
except models.StudyComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request, format=None):
queryset = models.StudyGroup.objects.prefetch_related(
'study_comments', 'study_likes').select_related('creator').all()
title = request.query_params.get('title', None)
creator = request.query_params.get('creator', None)
if title is None and creator is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
studyGroup1 = title is not None and queryset.filter(
title__istartswith=title) or queryset.none()
studyGroup2 = creator is not None and queryset.filter(
creator__username__istartswith=creator) or queryset.none()
mergeStudyGroups = studyGroup1 | studyGroup2
serializer = serializers.StudySerializer(
mergeStudyGroups, many=True, context={"request": request}
)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class Wish_Study(APIView):
def find_own_study(self, study_id):
try:
study = models.StudyGroup.objects.get(id=study_id)
return study
except models.StudyGroup.DoesNotExist:
return None
def post(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
study.wish_users.add(user)
study.save()
return Response(status=status.HTTP_302_FOUND)
def delete(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
study.wish_users.remove(user)
study.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class Attend_Study(APIView):
def find_own_study(self, study_id):
try:
study = models.StudyGroup.objects.get(id=study_id)
return study
except models.StudyGroup.DoesNotExist:
return None
def post(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
study.attend_users.add(user)
study.save()
return Response(status=status.HTTP_302_FOUND)
def delete(self, request, study_id, format=None):
user = request.user
study = self.find_own_study(study_id)
if study is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
study.attend_users.remove(user)
study.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class Recomments(APIView):
def get(self, request, study_id, comment_id, format=None):
try:
comments = models.StudyComment.objects.select_related('creator','study').filter(study__id=study_id, parent=comment_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.StudyGroup.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, study_id, comment_id, format=None):
user = request.user
try:
found_study = models.StudyGroup.objects.get(id=study_id)
found_comment = models.StudyComment.objects.get(id=comment_id, study__id=study_id)
except models.StudyGroup.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
groupOrder = models.StudyComment.objects.filter(study__id=study_id, parent=comment_id).count() + 1
serializer.save(creator=user, study=found_study, parent=comment_id,
groupNumber=found_comment.groupNumber, groupOrder=groupOrder)
notification_views.create_notification(user, found_study.creator,
'study_recomment', study=found_study, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ReCommentDetail(APIView):
def find_own_study(self, study_id, user):
try:
study = models.StudyGroup.objects.get(id=study_id, creator=user)
return study
except models.StudyGroup.DoesNotExist:
return None
def find_own_recomment(self, comment_id, recomment_id, user):
try:
recomment = models.StudyComment.objects.get(id=recomment_id, parent=comment_id, creator=user)
return recomment
except models.StudyComment.DoesNotExist:
return None
def get(self, request, study_id, comment_id, recomment_id, format=None):
user = request.user
try:
recomment = models.StudyComment.objects.select_related(
'study', 'creator').get(
id=recomment_id, study__id=study_id, parent=comment_id)
except models.StudyComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(recomment)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, study_id, comment_id, recomment_id, format=None):
user = request.user
recomment = self.find_own_recomment(comment_id, recomment_id, user)
if recomment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
recomment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, study_id, comment_id, recomment_id, format=None):
user = request.user
try:
comment_to_delete = models.StudyComment.objects.get(
id=recomment_id, study__id=study_id, parent=comment_id, creator=user)
comment_to_delete.delete()
except models.StudyComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,119
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/notifications/apps.py
|
from django.apps import AppConfig
class NotificationsAppConfig(AppConfig):
name = 'wecode.notifications'
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,120
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/users/views.py
|
from rest_framework.views import APIView
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.generics import CreateAPIView, UpdateAPIView
from django.contrib.auth import get_user_model # If used custom user model
from . import models, serializers
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
from rest_framework.renderers import TemplateHTMLRenderer
class CreateUserView(CreateAPIView):
model = get_user_model()
permission_classes = [
permissions.AllowAny # Or anon users can't register
]
serializer_class = serializers.BasicUserSerializer
class ChangePassword(APIView):
def put(self, request, username, format=None):
user = request.user
if user.username == username:
current_password = request.data.get('current_password', None)
if current_password is not None:
passwords_match = user.check_password(current_password)
if passwords_match:
new_password = request.data.get('new_password', None)
if new_password is not None:
user.set_password(new_password)
user.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class UpdateUserView(APIView):
def post(self, request, format=None):
user = request.user
user_serializer = serializers.UserSerializer(user)
serializer = serializers.UserSerializer(user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(partial=True)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class ProfileView(APIView):
def get(self, request, format=None):
user = models.User.objects.prefetch_related('lectures', 'studygroups')
user = models.User.objects.prefetch_related('lectures__creator', 'studygroups__creator')
user = models.User.objects.prefetch_related('attend_lectures', 'attend_studygroups')
user = models.User.objects.prefetch_related('attend_lectures__creator', 'attend_studygroups__creator')
user = models.User.objects.prefetch_related('wish_lectures', 'wish_studygroups')
user = models.User.objects.prefetch_related('wish_lectures__creator', 'wish_studygroups__creator')
user = user.get(id=request.user.id)
# user = user.get(pk=pk)
serializer = serializers.UserProfileSerializer(user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
user = request.user
user_serializer = serializers.UserSerializer(user)
serializer = serializers.UserSerializer(user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(partial=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,121
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/studygroups/urls.py
|
from django.urls import path
from . import views
app_name = "studygroups"
urlpatterns = [
path("", view=views.study_list_view.as_view(), name="list"),
path("search/", view=views.Search.as_view(), name="search"),
path("<study_id>/wish/", view=views.study_detail.as_view(), name="wish_study"),
path("<study_id>/attend/", view=views.study_detail.as_view(), name="attend_study"),
path("<study_id>/", view=views.study_detail.as_view(), name="detail"),
path("<study_id>/likes/", view=views.Likes.as_view(), name="like_study"),
path("<study_id>/unlikes/", view=views.Unlikes.as_view(), name="unlike_study"),
path("<study_id>/comments/", view=views.Comments.as_view(), name="comments"),
path("<study_id>/comments/<comment_id>/", view=views.CommentDetail.as_view(), name="comment_detail"),
path("<study_id>/comments/<comment_id>/recomments/", view=views.Recomments.as_view(), name="recomments"),
path("<study_id>/comments/<comment_id>/recomments/<recomment_id>/",
view=views.ReCommentDetail.as_view(), name="recomment_detail"),
]
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,122
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/lectures/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from . import models, serializers
from wecode.users import serializers as user_serializers
from wecode.users import models as user_models
from wecode.notifications import views as notification_views
from django.shortcuts import get_object_or_404
class lecture_list_view(generics.ListCreateAPIView):
# queryset = models.Lecture.objects.prefetch_related(
# 'lecture_comments', 'lecture_likes','lecture_likes__creator').select_related('creator').all()
serializer_class = serializers.LectureSerializer
filter_backends = [SearchFilter]
search_fields = ['title', 'id']
pagination_class = PageNumberPagination
def get_queryset(self):
queryset = models.Lecture.objects.all()
# Set up eager loading to avoid N+1 selects
queryset = self.get_serializer_class().setup_eager_loading(queryset)
return queryset
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.LectureDetailSerializer
return serializers.LectureSerializer
def get_serializer_context(self):
return {'request': self.request}
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class lecture_detail(APIView):
def find_own_lecture(self, lecture_id, user):
try:
lecture = models.Lecture.objects.get(id=lecture_id, creator=user)
return lecture
except models.Lecture.DoesNotExist:
return None
def get(self, request, lecture_id, format=None):
lecture = models.Lecture.objects.prefetch_related(
'lecture_likes', 'lecture_likes__creator',
'lecture_comments','lecture_comments__creator'
, 'wish_users', 'attend_users'
).select_related('creator').get(id=lecture_id)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.LectureDetailSerializer(lecture, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id, user)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.LectureSerializer(
lecture, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id, user)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lecture.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Likes(APIView):
def get(self, request, lecture_id, format=None):
lecture_likes = models.LectureLike.objects.filter(lecture__id=lecture_id)
like_creator_ids = lecture_likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creator_ids)
serializer = user_serializers.FeedUserSerializer(users, many=True, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, lecture_id, format=None):
user = request.user
try:
found_lecture = models.Lecture.objects.get(id=lecture_id)
except models.Lecture.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisting_like = models.LectureLike.objects.get(
creator=user,
lecture=found_lecture
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.LectureLike.DoesNotExist:
new_like = models.LectureLike.objects.create(
creator=user,
lecture=found_lecture
)
new_like.save()
# notification_views.create_notification(user, found_image.creator, 'like', found_image)
return Response(status=status.HTTP_201_CREATED)
class Unlikes(APIView):
def delete(self, request, lecture_id, format=None):
user = request.user
try:
preexisting_like = models.LectureLike.objects.get(
creator=user,
lecture__id=lecture_id
)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.LectureLike.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class Comments(APIView):
def get(self, request, lecture_id, format=None):
try:
comments = models.LectureComment.objects.filter(lecture__id=lecture_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.Lecture.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, lecture_id, format=None):
user = request.user
try:
found_lecture = models.Lecture.objects.get(id=lecture_id)
except models.Lecture.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
count = models.LectureComment.objects.filter(lecture__id=lecture_id, parent=0).count()
serializer.save(creator=user, lecture=found_lecture, groupNumber=count + 1)
notification_views.create_notification(
user, found_lecture.creator, 'comment', lecture=found_lecture, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentDetail(APIView):
def find_own_comment(self, comment_id, user):
try:
comment = models.LectureComment.objects.get(id=comment_id, creator=user)
return comment
except models.LectureComment.DoesNotExist:
return None
def get(self, request, lecture_id, comment_id, format=None):
user = request.user
try:
comment = models.LectureComment.objects.get(id=comment_id, lecture__id=lecture_id)
except models.LectureComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(comment)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, lecture_id, comment_id, format=None):
user = request.user
comment = self.find_own_comment(comment_id, user)
if comment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
comment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, lecture_id, comment_id, format=None):
user = request.user
try:
comment_to_delete = models.LectureComment.objects.get(
id=comment_id, lecture__id=lecture_id, creator=user)
if comment_to_delete.recomment_count == 0:
comment_to_delete.delete()
else:
comment_to_delete.message = "์ญ์ ๋ ๋๊ธ์
๋๋ค."
comment_to_delete.save()
except models.LectureComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request, format=None):
title = request.query_params.get('title', None)
creator = request.query_params.get('creator', None)
if title is None and creator is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lectures1 = title is not None and models.Lecture.objects.filter(
title__istartswith=title) or models.Lecture.objects.none()
lectures2 = creator is not None and models.Lecture.objects.filter(
creator__username__istartswith=creator) or models.Lecture.objects.none()
mergeLectures = lectures1 | lectures2
serializer = serializers.LectureSerializer(
mergeLectures, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class Wish_Lecture(APIView):
def find_own_lecture(self, lecture_id):
try:
lecture = models.Lecture.objects.get(id=lecture_id)
return lecture
except models.Lecture.DoesNotExist:
return None
def post(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lecture.wish_users.add(user)
lecture.save()
return Response(status=status.HTTP_302_FOUND)
def delete(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lecture.wish_users.remove(user)
lecture.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class Attend_Lecture(APIView):
def find_own_lecture(self, lecture_id):
try:
lecture = models.Lecture.objects.get(id=lecture_id)
return lecture
except models.Lecture.DoesNotExist:
return None
def post(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lecture.attend_users.add(user)
lecture.save()
return Response(status=status.HTTP_302_FOUND)
def delete(self, request, lecture_id, format=None):
user = request.user
lecture = self.find_own_lecture(lecture_id)
if lecture is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
lecture.attend_users.remove(user)
lecture.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class Recomments(APIView):
def get(self, request, lecture_id, comment_id, format=None):
try:
comments = models.LectureComment.objects.filter(lecture__id=lecture_id, parent=comment_id)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except models.Lecture.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, lecture_id, comment_id, format=None):
user = request.user
try:
found_lecture = models.Lecture.objects.get(id=lecture_id)
found_comment = models.LectureComment.objects.get(id=comment_id, lecture__id=lecture_id)
except models.Lecture.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
groupOrder = models.LectureComment.objects.filter(lecture__id=lecture_id, parent=comment_id).count() + 1
serializer.save(creator=user, lecture=found_lecture, parent=comment_id,
groupNumber=found_comment.groupNumber, groupOrder=groupOrder)
notification_views.create_notification(user, found_lecture.creator,
'lecture_recomment', lecture=found_lecture, comment=serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(datea=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ReCommentDetail(APIView):
def find_own_lecture(self, lecture_id, user):
try:
lecture = models.Lecture.objects.get(id=lecture_id, creator=user)
return lecture
except models.Lecture.DoesNotExist:
return None
def find_own_recomment(self, comment_id, recomment_id, user):
try:
recomment = models.LectureComment.objects.get(id=recomment_id, parent=comment_id, creator=user)
return recomment
except models.LectureComment.DoesNotExist:
return None
def get(self, request, lecture_id, comment_id, recomment_id, format=None):
user = request.user
try:
recomment = models.LectureComment.objects.get(
id=recomment_id, lecture__id=lecture_id, parent=comment_id)
except models.LectureComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(recomment)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, lecture_id, comment_id, recomment_id, format=None):
user = request.user
recomment = self.find_own_recomment(comment_id, recomment_id, user)
if recomment is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.CommentSerializer(
recomment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, lecture_id, comment_id, recomment_id, format=None):
user = request.user
try:
comment_to_delete = models.LectureComment.objects.get(
id=recomment_id, lecture__id=lecture_id, parent=comment_id, creator=user)
comment_to_delete.delete()
except models.LectureComment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class TestView(APIView):
def get(self, request, formant=None):
queryset = models.Lecture.objects.all()
serializer =serializers.TestSerializer(queryset, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,123
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.core.mail import send_mail
from wecode.users.serializers import PasswordResetSerializer
from wecode.users.models import User
import os
import uuid
class ReactAppView(View):
def get(self, request):
try:
with open(os.path.join(str(settings.ROOT_DIR), 'frontend', 'build', 'index.html')) as file:
return HttpResponse(file.read())
except:
return HttpResponse(
"""
index.html not found ! build your React app !!
""",
status=501,
)
class passwordReset(APIView):
# def email_client(request):
# id = request.POST.get('id')
# client = Client.objects.get(id=id)
# msg_html = render_to_string('templates/email.html', {'client': client})
# template_email_text = ''
# return send_mail('Lelander work samples', template_email_text, 'test@emailsender.com', ['test@emailrecipient.com'], html_message=msg_html, fail_silently=False)
def post(self, request, *args, **kwargs):
serializer = PasswordResetSerializer(data=request.data)
email = request.data['email']
user = get_object_or_404(User, email=email)
username = user.username
temp = uuid.uuid4().hex[:10]
subject = '[WECODE] ์์ ๋น๋ฐ๋ฒํธ ๋ฐ๊ธ์
๋๋ค.'
message = '์์๋น๋ฐ ๋ฒํธ๋ {} ์
๋๋ค. \n {}๋ก ๋ก๊ทธ์ธ ํ ๋ค ๋น๋ฐ๋ฒํธ๋ฅผ ์์ ํด์ฃผ์ธ์\n\n ์์ด๋๋ {} ์
๋๋ค.'.format(temp,temp,username)
email_from = settings.EMAIL_HOST_USER
recipient_list = [email, ]
print(subject)
print(message)
print(email_from)
print(recipient_list)
user.set_password(temp)
user.save()
send_mail(subject,message, email_from, recipient_list)
return Response(status=status.HTTP_200_OK)
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,124
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/studygroups/apps.py
|
from django.apps import AppConfig
class StudygroupsAppConfig(AppConfig):
name = 'wecode.studygroups'
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,125
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/banners/admin.py
|
from django.contrib import admin
from .models import Banner, Images
from django_summernote.admin import SummernoteModelAdmin
class PhotoInline(admin.StackedInline):
model = Images
extra = 2
@admin.register(Banner)
class BannerAdmin(SummernoteModelAdmin):
inlies = [PhotoInline]
list_display = ['title', 'creator', 'location', 'short_description']
@admin.register(Images)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('id', 'upload_date')
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,126
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/posts/serializers.py
|
from rest_framework import serializers
from . import models
from wecode.users import models as user_models
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'id',
'username',
'profile_image',
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.PostComment
fields = (
'id',
'message',
'creator',
'created_time_mdhm',
'parent',
'groupNumber',
'groupOrder',
'recomment_count',
)
class PostAnswerTitleSerializer(serializers.ModelSerializer):
class Meta:
model = models.Post
fields = (
'id',
'title'
)
class PostSerializer(serializers.ModelSerializer):
like_count = serializers.ReadOnlyField()
post_comments = CommentSerializer(read_only=True, many=True)
comment_count = serializers.ReadOnlyField()
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Post
fields = (
"id",
"created_time_mdhm",
"created_time_ymdhm",
"created_time_ymd",
"updated_at",
"title",
"post_type",
# "description",
"creator",
"view_count",
"like_count",
"comment_count",
"post_comments",
"isImportant",
)
class PostDetailSerializer(serializers.ModelSerializer):
like_count = serializers.ReadOnlyField()
post_comments = CommentSerializer(read_only=True, many=True)
comment_count = serializers.ReadOnlyField()
creator = FeedUserSerializer(read_only=True)
is_liked = serializers.SerializerMethodField()
class Meta:
model = models.Post
fields = (
"id",
"created_time_mdhm",
"updated_at",
"title",
"post_type",
"description",
"creator",
"view_count",
"like_count",
"comment_count",
"post_comments",
'is_liked'
)
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
queryset = obj.post_likes.all()
for data in queryset:
if data.creator.id == request.user.id:
return True
return False
return False
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
23,759,127
|
fullyalive/project_wecode
|
refs/heads/test
|
/wecode/lectures/migrations/0002_auto_20181026_1847.py
|
# Generated by Django 2.0.6 on 2018-10-26 09:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lectures', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lecturelike',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='lecturelike',
name='lecture',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lecture_likes', to='lectures.Lecture'),
),
migrations.AddField(
model_name='lectureimages',
name='lecture',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='lectures.Lecture'),
),
migrations.AddField(
model_name='lecturecomment',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='lecturecomment',
name='lecture',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lecture_comments', to='lectures.Lecture'),
),
migrations.AddField(
model_name='lecture',
name='attend_users',
field=models.ManyToManyField(blank=True, related_name='attend_lectures', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='lecture',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lectures', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='lecture',
name='wish_users',
field=models.ManyToManyField(blank=True, related_name='wish_lectures', to=settings.AUTH_USER_MODEL),
),
]
|
{"/wecode/views.py": ["/wecode/users/serializers.py"], "/wecode/lectures/admin.py": ["/wecode/lectures/models.py"], "/wecode/users/serializers.py": ["/wecode/posts/models.py"], "/wecode/banners/admin.py": ["/wecode/banners/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.