seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8983073054 | from datetime import datetime
from typing import Optional, Union
class Poll:
"""
Slot class for each Pool object.
"""
MAX_OPTIONS = 10
MIN_OPTIONS = 2
__slots__ = [
"_message_id",
"_channel_id",
"_question",
"_options",
"_date_created_at",
"_user_id",
]
def __init__(
self,
message_id: int,
channel_id: int,
question: str,
options: list[str],
user_id: Optional[int] = None,
date_created: Optional[Union[datetime, str]] = datetime.now().strftime(
"%Y-%m-%d"
),
):
self._message_id = message_id
self._channel_id = channel_id
self._question = question
self._options = options
self._date_created_at = date_created
self._user_id = user_id
@property
def message_id(self) -> int:
return self._message_id
@property
def channel_id(self) -> int:
return self._channel_id
@property
def question(self) -> str:
return self._question
@property
def options(self) -> list[str]:
return self._options
@property
def created_at(self) -> datetime.date:
if isinstance(self._date_created_at, str):
return datetime.fromisoformat(self._date_created_at).date()
if isinstance(self._date_created_at, datetime):
return self._date_created_at.date()
return self._date_created_at
@property
def user_id(self) -> int:
return self._user_id
| TheXer/Jachym | src/ui/poll.py | poll.py | py | 1,559 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
... |
73224987304 | # views.py
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from calculator.models import Report
from calculator.serializers import ReportSerializer, ReportCalculationSerializer
import pandas as pd
from uuid import uuid4
from django.core.files.uploadedfile import InMemoryUploadedFile
from drf_spectacular.utils import extend_schema
@extend_schema(responses=ReportSerializer)
@api_view(['GET'])
def all_reports(request):
"""Retrieve the names and uuids of all saved reports"""
reports = Report.objects.all()
serializer = ReportSerializer(reports, many=True)
return Response(serializer.data)
@extend_schema(responses=ReportCalculationSerializer)
@api_view(['GET'])
def report(request, uuid):
"""Retrieve tax data for a specific report"""
report = get_object_or_404(Report, uuid=uuid)
serializer = ReportCalculationSerializer(report)
return Response(serializer.data)
@extend_schema(
request={
'multipart/form-data': {
'type': 'object',
'properties': {
'file': {
'type': 'file',
'format': 'binary'
},
'name': {
'type': 'string',
}
},
'required': ['file', 'name']
}
},
responses={201: ReportSerializer}
)
@api_view(['POST'])
def transactions(request):
"""Post a new .csv file with transactions made"""
column_names = ['date', 'transaction_type', 'amount', 'memo']
uploaded_file = request.FILES.get('file')
# This is the most vulnerable part of the code, user input can be amazingly bad sometimes
# Check if the uploaded file is empty
if isinstance(uploaded_file, InMemoryUploadedFile) and uploaded_file.size == 0:
return Response({"error": "Empty file. Please provide a file with content."},
status=status.HTTP_400_BAD_REQUEST)
# Try reading the file using pandas to check its format
try:
df = pd.read_csv(uploaded_file, names=column_names, header=0)
except pd.errors.ParserError:
return Response({"error": "Invalid file format. Please provide a valid CSV file."},
status=status.HTTP_400_BAD_REQUEST)
df = df.dropna()
report_data = {
'name': request.data.get('name'),
'uuid': uuid4(),
'transactions': df.to_dict('records')
}
report_serializer = ReportSerializer(data=report_data)
report_serializer.is_valid(raise_exception=True)
report_serializer.save()
return Response(report_serializer.data, status=status.HTTP_201_CREATED)
| StefKal/superdupertax | superdupertax/calculator/views.py | views.py | py | 2,749 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "calculator.models.Report.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "calculator.models.Report.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "calculator.models.Report",
"line_number": 18,
"usage_type": "n... |
8086245877 | #!/usr/bin/env python
"""
The file contains the class and methods for loading and aligning datasets
"""
from __future__ import print_function, division
import pickle
import numpy as np
from scipy.io import loadmat
import pandas as pd
from .utils import p2fa_phonemes
import warnings
from collections import OrderedDict
from copy import deepcopy
import sys
__author__ = "Prateek Vij"
__copyright__ = "Copyright 2017, Carnegie Mellon University"
__credits__ = ["Amir Zadeh", "Prateek Vij", "Soujanya Poria"]
__license__ = "GPL"
__version__ = "1.0.1"
__status__ = "Production"
if sys.version_info <= (3,):
FileNotFoundError = IOError # python 2 doesn't have FileNotFoundError
class Dataset(object):
"""Primary class for loading and aligning dataset"""
def __init__(self, dataset_file='', stored=False):
"""
Initialise the Dataset class. Support two loading mechanism -
from dataset files and from the pickle file, decided by the param
stored.
:param stored: True if loading from pickle, false if loading from
dataset feature files. Default False
:param dataset_file: Filepath to the file required to load dataset
features. CSV or pickle file depending upon the
loading mechanism
:timestamps: absolute or relative.
"""
self.feature_dict = None
self.timestamps = 'absolute' # this is fixed, we no longer support relative timestamps
self.stored = stored
self.dataset_file = dataset_file
self.phoneme_dict = p2fa_phonemes
self.loaded = False
def __getitem__(self, key):
"""Adding direct access of internal data"""
return self.feature_dict[key]
def keys(self):
"""Wrapper for .keys() for the feature_dict"""
return self.feature_dict.keys()
def items(self):
"""Wrapper for .items() for the feature_dict"""
return self.feature_dict.items()
def load(self):
"""
Loads feature dictionary for the input dataset
:returns: Dictionary of features for the dataset with each modality
as dictionary key
"""
# Load from the pickle file if stored is True
if self.stored:
self.dataset_pickle = self.dataset_file
self.feature_dict = pickle.load(open(self.dataset_pickle))
return self.feature_dict
# Load the feature dictionary from the dataset files
self.dataset_csv = self.dataset_file
self.feature_dict = self.controller()
self.loaded = True
return self.feature_dict
def controller(self):
"""
Validates the dataset csv file and loads the features for the dataset
from its feature files
"""
def validate_file(self):
data = pd.read_csv(self.dataset_csv, header=None)
data = np.asarray(data)
#data = data[:,:7]
self.dataset_info = {}
modality_count = len(data[0]) - 4
self.modalities = {}
for i in range(modality_count):
# key = 'modality_' + str(i)
key = str(data[0][i + 4])
info = {}
info["level"] = str(data[1][i + 4])
info["type"] = str(data[0][i + 4])
self.modalities[key] = info
for record in data[2:]:
video_id = str(record[0])
segment_id = str(record[1])
if video_id not in self.dataset_info:
self.dataset_info[video_id] = {}
if segment_id in self.dataset_info[video_id]:
raise NameError("Multiple instances of segment "
+ segment_id + " for video " + video_id)
segment_data = {}
segment_data["start"] = float(record[2])
segment_data["end"] = float(record[3])
for i in range(modality_count):
# key = 'modality_' + str(i)
key = str(data[0][i + 4])
segment_data[key] = str(record[i + 4])
self.dataset_info[video_id][segment_id] = segment_data
return
def load_features(self):
feat_dict = {}
data = self.dataset_info
modalities = self.modalities
# timestamps = self.timestamps
for key, value in modalities.items():
api = value['type']
level = value['level']
loader_method = Dataset.__dict__["load_" + api]
modality_feats = {}
print("Loading features for", api)
for video_id, video_data in data.items():
video_feats = {}
for segment_id, segment_data in video_data.items():
filepath = str(segment_data[key])
start = segment_data["start"]
end = segment_data["end"]
video_feats[segment_id] = loader_method(self,
filepath, start,
end, timestamps=self.timestamps,
level=level)
modality_feats[video_id] = video_feats
modality_feats = OrderedDict(sorted(modality_feats.items(), key=lambda x: x[0]))
feat_dict[key] = modality_feats
return feat_dict
validate_file(self)
feat_dict = load_features(self)
return feat_dict
def load_opensmile(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load OpenSmile Features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
Note: Opensmile support features for entire segment or video only and
will return None if level is 'v' and start time is
"""
features = []
start_time, end_time = start, end
if timestamps == 'relative':
start_time = 0.0
end_time = end - start
if level == 's' or start == 0.0:
feats = open(filepath).readlines()[-1].strip().split(',')[1:]
feats = [float(feat_val) for feat_val in feats]
feat_val = np.asarray(feats, dtype=np.float32)
features.append((start_time, end_time, feat_val))
else:
print("Opensmile support features for the entire segment")
return None
return features
def load_covarep(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load COVAREP Features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
time_period = 0.01
try:
f_content = loadmat(filepath)
except (FileNotFoundError, TypeError, ValueError) as e: # depending on loadmat("XXX") or loadmat("XXX.mat"), error will be different
print(".mat file cannot load at {}!".format(filepath))
return [] # if no feature file present, return an empty list
feats = f_content['features']
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
feat_start = start_time
for feat in feats:
feat_end = feat_start + time_period
feat_val = np.asarray(feat)
features.append((max(feat_start - start_time, 0), max(feat_end - start_time, 0), feat_val))
feat_start += time_period
else:
feat_count = feats.shape[0]
start_index = int(min((start / time_period), feat_count))
end_index = int(min((end / time_period), feat_count))
feat_start = start_time
for feat in feats[start_index:end_index]:
feat_end = feat_start + time_period
feat_val = np.asarray(feat)
features.append((max(feat_start - start_time, 0), max(feat_end - start_time, 0), feat_val))
feat_start += time_period
return features
def load_phonemes(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load P2FA phonemes as Features from the file corresponding to the
param filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_embeddings(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load Word Embeddings from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_words(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load one hot embeddings for words as features from the file
corresponding to the param filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
try:
feat_val = int(line.split(",")[2])
except:
print(filepath, start, end)
# feat_val = [float(val) for val in line.split(",")[2:]]
# assert len(feat_val) == 1
# feat_val = np.asarray(feat_val)[0]
#print(feat_start, feat_end)
#assert False
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_openface(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load OpenFace features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
time_period = 0.0333333
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[1:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[1])
feat_end = feat_start + time_period
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[1:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[1])
if (feat_start >= start and feat_start < end):
# To adjust the timestamps
feat_start = feat_start - start
feat_end = feat_start + time_period
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
# note that this is implicity new facet
def load_facet(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
# load a subset of current segment and infer its format
start_row = 0
start_col = 0
with open(filepath, 'r') as f_handle:
splitted = []
for line in f_handle.readlines()[0:10]:
splitted.append(line.split(","))
# check if the first row is a header by checking if the first field is a number
try:
float(splitted[start_row][start_col])
except:
start_row = 1
# check if the first column is a index column by checking if it increments by 1 everytime
for i in range(1, len(splitted) - 1):
if (float(splitted[i+1][0]) - float(splitted[i][0])) != 1:
start_col = 0
break
start_col = 1
time_period = float(splitted[start_row][start_col])
start_time, end_time = start, end
# if timestamps == "relative":
# start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[start_row:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[start_col])
feat_end = feat_start + time_period
feat_val = []
for val in line.split(",")[start_col + 1:-1]:
try:
feat_val.append(float(val))
except:
feat_val.append(0.0)
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[start_row:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[start_col])
if (feat_start >= start and feat_start < end):
# To adjust the timestamps
feat_start = feat_start - start
feat_end = feat_start + time_period
feat_val = []
for val in line.split(",")[start_col + 1:-1]:
try:
feat_val.append(float(val))
except:
feat_val.append(0.0)
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_facet1(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
return self.load_facet(filepath, start, end, timestamps=timestamps, level=level)
def load_facet2(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
return self.load_facet(filepath, start, end, timestamps=timestamps, level=level)
def align(self, align_modality):
aligned_feat_dict = {}
modalities = self.modalities
alignments = self.get_alignments(align_modality)
for modality in modalities:
if modality == align_modality:
aligned_feat_dict[modality] = OrderedDict(sorted(self.feature_dict[modality].items(), key=lambda x: x[0]))
continue
aligned_modality = self.align_modality(modality, alignments)
aligned_feat_dict[modality] = OrderedDict(sorted(aligned_modality.items(), key=lambda x: x[0]))
self.aligned_feature_dict = aligned_feat_dict
return aligned_feat_dict
def get_alignments(self, modality):
alignments = {}
aligned_feat_dict = self.feature_dict[modality]
for video_id, segments in aligned_feat_dict.items():
segment_alignments = {}
for segment_id, features in segments.items():
segment_alignments[segment_id] = []
for value in features:
timing = (value[0], value[1])
segment_alignments[segment_id].append(timing)
alignments[video_id] = segment_alignments
return alignments
def align_modality(self, modality, alignments, merge_type="mean"):
aligned_feat_dict = {}
modality_feat_dict = self.feature_dict[modality]
warning_hist = set() # Keep track of all the warnings
for video_id, segments in alignments.items():
aligned_video_feats = {}
for segment_id, feat_intervals in segments.items():
aligned_segment_feat = []
for start_interval, end_interval in feat_intervals:
time_interval = end_interval - start_interval
feats = modality_feat_dict[video_id][segment_id]
try:
if modality == "words":
aligned_feat = feats[0][2] - feats[0][2]
else:
aligned_feat = np.zeros(len(feats[0][2]))
except:
if (video_id, segment_id) not in warning_hist:
print("\nModality {} for video {} segment {} is (partially) missing and is thus being replaced by zeros!\n".format(modality.split("_")[-1], video_id, segment_id))
warning_hist.add((video_id, segment_id))
# print(modality, video_id, segment_id, feats)
for sid, seg_data in modality_feat_dict[video_id].items():
if seg_data != []:
feats = seg_data
break
try:
if modality == "words":
aligned_feat = feats[0][2] - feats[0][2]
else:
aligned_feat = np.zeros(len(feats[0][2]))
except:
if modality == "words":
aligned_feat = 0
else:
aligned_feat = np.zeros(0)
for feat_tuple in feats:
feat_start = feat_tuple[0]
feat_end = feat_tuple[1]
feat_val = feat_tuple[2]
if (feat_start < end_interval
and feat_end >= start_interval):
feat_weight = (min(end_interval, feat_end) -
max(start_interval, feat_start)) / time_interval
weighted_feat = np.multiply(feat_val, feat_weight)
if np.shape(aligned_feat) == (0,):
aligned_feat = weighted_feat
else:
aligned_feat = np.add(aligned_feat, weighted_feat)
aligned_feat_tuple = (start_interval, end_interval,
aligned_feat)
aligned_segment_feat.append(aligned_feat_tuple)
aligned_video_feats[segment_id] = aligned_segment_feat
aligned_feat_dict[video_id] = aligned_video_feats
return aligned_feat_dict
@staticmethod
def merge(dataset1, dataset2):
# ensure the merged objects are indeed Datasets
assert isinstance(dataset1, Dataset)
assert isinstance(dataset2, Dataset)
# merge the feature_dict and modalities attributes
merged_modalities = Dataset.merge_dict(dataset1.modalities, dataset2.modalities)
merged_feat_dict = Dataset.merge_dict(dataset1.feature_dict, dataset2.feature_dict)
mergedDataset = Dataset()
mergedDataset.feature_dict = merged_feat_dict
mergedDataset.modalities = merged_modalities
return mergedDataset
@staticmethod
def merge_dict(dict1, dict2):
merged = deepcopy(dict1)
merged.update(dict2)
return merged
| codeislife99/Multimodal_Emotion_Analysis | mmdata/dataset.py | dataset.py | py | 29,605 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "utils.p2fa_phonemes",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv... |
34594770015 | """OpenAPI schema utility functions."""
from io import StringIO
_DEFAULT_EXAMPLES = {
"string": "string",
"integer": 1,
"number": 1.0,
"boolean": True,
"array": [],
}
_DEFAULT_STRING_EXAMPLES = {
"date": "2020-01-01",
"date-time": "2020-01-01T01:01:01Z",
"password": "********",
"byte": "QG1pY2hhZWxncmFoYW1ldmFucw==",
"ipv4": "127.0.0.1",
"ipv6": "::1",
}
def example_from_schema(schema):
"""
Generates an example request/response body from the provided schema.
>>> schema = {
... "type": "object",
... "required": ["id", "name"],
... "properties": {
... "id": {
... "type": "integer",
... "format": "int64"
... },
... "name": {
... "type": "string",
... "example": "John Smith"
... },
... "tag": {
... "type": "string"
... }
... }
... }
>>> example = example_from_schema(schema)
>>> assert example == {
... "id": 1,
... "name": "John Smith",
... "tag": "string"
... }
"""
# If an example was provided then we use that
if "example" in schema:
return schema["example"]
elif "oneOf" in schema:
return example_from_schema(schema["oneOf"][0])
elif "anyOf" in schema:
return example_from_schema(schema["anyOf"][0])
elif "allOf" in schema:
# Combine schema examples
example = {}
for sub_schema in schema["allOf"]:
example.update(example_from_schema(sub_schema))
return example
elif "enum" in schema:
return schema["enum"][0]
elif "type" not in schema:
# Any type
return _DEFAULT_EXAMPLES["integer"]
elif schema["type"] == "object" or "properties" in schema:
example = {}
for prop, prop_schema in schema.get("properties", {}).items():
example[prop] = example_from_schema(prop_schema)
return example
elif schema["type"] == "array":
items = schema["items"]
min_length = schema.get("minItems", 0)
max_length = schema.get("maxItems", max(min_length, 2))
assert min_length <= max_length
# Try generate at least 2 example array items
gen_length = min(2, max_length) if min_length <= 2 else min_length
example_items = []
if items == {}:
# Any-type arrays
example_items.extend(_DEFAULT_EXAMPLES.values())
elif isinstance(items, dict) and "oneOf" in items:
# Mixed-type arrays
example_items.append(_DEFAULT_EXAMPLES[sorted(items["oneOf"])[0]])
else:
example_items.append(example_from_schema(items))
# Generate array containing example_items and satisfying min_length and max_length
return [example_items[i % len(example_items)] for i in range(gen_length)]
elif schema["type"] == "string":
example_string = _DEFAULT_STRING_EXAMPLES.get(
schema.get("format", None), _DEFAULT_EXAMPLES["string"]
)
min_length = schema.get("minLength", 0)
max_length = schema.get("maxLength", max(min_length, len(example_string)))
gen_length = (
min(len(example_string), max_length)
if min_length <= len(example_string)
else min_length
)
assert 0 <= min_length <= max_length
if min_length <= len(example_string) <= max_length:
return example_string
else:
example_builder = StringIO()
for i in range(gen_length):
example_builder.write(example_string[i % len(example_string)])
example_builder.seek(0)
return example_builder.read()
elif schema["type"] in ("integer", "number"):
example = _DEFAULT_EXAMPLES[schema["type"]]
if "minimum" in schema and "maximum" in schema:
# Take average
example = schema["minimum"] + (schema["maximum"] - schema["minimum"]) / 2
elif "minimum" in schema and example <= schema["minimum"]:
example = schema["minimum"] + 1
elif "maximum" in schema and example >= schema["maximum"]:
example = schema["maximum"] - 1
return float(example) if schema["type"] == "number" else int(example)
else:
return _DEFAULT_EXAMPLES[schema["type"]]
| sphinx-contrib/openapi | sphinxcontrib/openapi/schema_utils.py | schema_utils.py | py | 4,446 | python | en | code | 103 | github-code | 36 | [
{
"api_name": "io.StringIO",
"line_number": 119,
"usage_type": "call"
}
] |
33006855107 | import csv
import io
from nltk.tokenize import word_tokenize
import sys
reload(sys)
sys.setdefaultencoding('ISO-8859-1')
def findLowest(topWords):
result = topWords.keys()[0]
for word in topWords:
if(topWords[word] < topWords[result]):
result = word
return result
with io.open("old_tweets.csv", encoding = "ISO-8859-1") as csvFile:
#fieldnames = ['username', 'date', 'text', 'id']
#reader = csv.DictReader(csvFile, fieldnames = fieldnames)
reader = csv.reader(csvFile, delimiter = ';')
reader.next()
i = 0;
counter = {}
topWords = {}
topsLowest = ""
topMaxAllowed = 2000
for row in reader:
for word in word_tokenize(row[2]):
counter[word] = counter.get(word, 0) + 1
if(len(topWords) < topMaxAllowed):
topWords[word] = counter[word]
if((not hasattr(topWords, topsLowest)) or topWords[topsLowest] > counter[word]):
topsLowest = word
elif (topWords[topsLowest] < counter[word]):
del topWords[topsLowest]
topWords[word] = counter[word]
topsLowest = findLowest(topWords)
with io.open("word_count.csv", mode="w", encoding="ISO-8859-1") as outputFile:
outputFile.write(unicode("word;frequency"))
for word in topWords:
outputFile.write(unicode("\n%s;%d" % (word, topWords[word])))
| Temirlan97/WhatTwitterFeels | wordBag/countWords.py | countWords.py | py | 1,230 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize"... |
25615151962 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import pandas as pd
from datetime import datetime
from txt_table import txt_table
import re
# 将txt文件转化为csv
def transfer_data(folder):
#建立使用文件列表
filenames=[]
filelist=[]
filexlsx=[]
#遍历文件寻找txt
files=os.walk(folder)
for root,dirs,files in files:
for file in files:
# 自动生成文件名
if "readme" in file:
filexls=re.match("(.*)_readme",file).group(1)
filexlsx.append(filexls)
else:
pass
filename=os.path.join(root,file)
filenames.append(filename)
for filename in filenames:
try:
if ".txt" in filename and "readme" not in filename:
fileout=filename.replace('.txt','.csv')
data=txt_table(filename)
data.to_csv(fileout,sep=',',encoding='utf-8')
filelist.append(fileout)
elif ".TXT" in filename and "readme" not in filename:
fileout = filename.replace('.TXT','.csv')
data=txt_table(filename)
data.to_csv(fileout,sep=',',encoding='utf-8')
filelist.append(fileout)
except:
print('error!')
continue
print('已将txt文件转化为csv!')
#将数据合并
#print(filexlsx[0])
fileouts=os.path.join(folder,(filexlsx[0]+'.csv'))
df=pd.DataFrame()
for i in range(0,len(filelist)):
df_i=pd.read_csv(filelist[i],sep=',',encoding='utf-8')
#print(df_i)
#df.append(df_i)
df=pd.concat([df,df_i])
df.to_csv(fileouts,sep=',',encoding='utf-8')
print(df.head())
print("已完成数据合并!")
return fileouts
# 提取相关数据
def extract_data(filecsv,jiwens,file_station):
# 读取气象数据,这一步要提取有用的气象数据并求平均值
datas = pd.read_csv(filecsv, sep=',', encoding='utf-8')
# 提取列名
datacolumns= list(datas.columns)[2:]
del datacolumns[2] # 删除日期
#print(datacolumns)
datacolumnslist = [list(datas[i]) for i in datacolumns]
#print(datacolumnslist[0])
# shidu_ave = list(datas[u'V13003_701'])
file_n = filecsv.replace(".csv", "_N.csv")
print(file_n)
stations_china = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'区站号'])
lng = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'经度'])
lat = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'纬度'])
# 建立站点数据列表
qixiangday=['stations_n','timeavailas','lngs','lats','station_n_tem_ave','station_n_tem_ave_max','station_n_tem_ave_min',
'station_n_shuiqiya_ave','station_n_jiangshui_20','station_n_jiangshui_08','station_n_fengsu_ave']
qixiangdaylist=[[] for i in range(len(qixiangday))]
#print(qixiangdaylist)
station_n_tem_sum = [[] for i in range(len(jiwens))]
for i in range(0, len(stations_china)):
# 临时列表
qixiangtemp = ['station_n','timeavaila', 'lng_n', 'lat_n', 'station_tem_ave', 'station_tem_ave_max','station_tem_ave_min',
'station_shuiqiya_ave', 'station_jiangshui_20', 'station_jiangshui_08','station_fengsu_ave']
qixiangtemplist=[[] for i in range(len(qixiangtemp))]
# 符合条件则建立列表
for j in range(0, len(datacolumnslist[0])):
if datacolumnslist[0][j] == stations_china[i]:
print(datacolumnslist[0][j])
qixiangtemplist[0].append(datacolumnslist[0][j]) # 区站号
qixiangtemplist[1].append(datacolumnslist[1][j]) # 有效时段
qixiangtemplist[2].append(lng[i]) # 经度
qixiangtemplist[3].append(lat[i]) # 纬度
qixiangtemplist[4].append(datacolumnslist[2][j]) # 累年日平均气温
qixiangtemplist[5].append(datacolumnslist[3][j]) # 累年平均日最高气温
qixiangtemplist[6].append(datacolumnslist[4][j]) # 累年平均日最低气温
qixiangtemplist[7].append(datacolumnslist[5][j]) # 累年日平均水汽压
qixiangtemplist[8].append(datacolumnslist[6][j]) # 累年20-20时日降水量
qixiangtemplist[9].append(datacolumnslist[7][j]) # 累年08-08时日降水量
qixiangtemplist[10].append(datacolumnslist[8][j]) # 累年日平均风速
#print(qixiangtemplist[9])
if len(qixiangtemplist[4]) != 0:
#print(qixiangtemplist[1])
qixiangdaylist[0].append(qixiangtemplist[0][0]) # 区站号
qixiangdaylist[1].append(str(qixiangtemplist[1][0])) # 有效时段
qixiangdaylist[2].append(qixiangtemplist[2][0]) # 经度
qixiangdaylist[3].append(qixiangtemplist[3][0]) # 纬度
# 求平均值
qixiangdaylist[4].append(sum(qixiangtemplist[4]) / len(qixiangtemplist[4])) # 累年日平均气温
qixiangdaylist[5].append(sum(qixiangtemplist[5]) / len(qixiangtemplist[5])) # 累年平均日最高气温
qixiangdaylist[6].append(sum(qixiangtemplist[6]) / len(qixiangtemplist[6])) # 累年平均日最低气温
qixiangdaylist[7].append(sum(qixiangtemplist[7]) / len(qixiangtemplist[9])) # 累年日平均水汽压
qixiangdaylist[8].append(sum(qixiangtemplist[8]) / len(qixiangtemplist[8])) # 累年20-20时日降水量
qixiangdaylist[9].append(sum(qixiangtemplist[9]) / len(qixiangtemplist[9])) # 累年08-08时日降水量
qixiangdaylist[10].append(sum(qixiangtemplist[10]) / len(qixiangtemplist[10])) # 累年日平均风速
# 求活动积温
for x in range(len(jiwens)):
tem_sum = []
for tem in qixiangtemplist[4]:
if tem > jiwens[x]:
tem_sum.append(tem)
else:
pass
station_n_tem_sum[x].append(sum(tem_sum))
#print(qixiangdaylist[10])
# 建立积温更新后的列表
for i in range(len(jiwens)):
qixiangday.append('jiwen%s' % jiwens[i])
qixiangdaylist.append(station_n_tem_sum[i])
dfs=pd.DataFrame(qixiangdaylist)
dfs=dfs.T
dfs.columns=qixiangday
print(dfs.head())
dfs.to_csv(file_n, sep=',', encoding='utf-8')
print('已完成气象数据提取!')
if __name__=='__main__':
time_start = datetime.now()
print('开始时间:' + str(time_start))
'''第一步,将txt文件转化为csv'''
folder = "D:\\Database\\02China\\04Qixiang\\510000\\"
#folder="C:\\Users\\jli\\Desktop\\AAA"
step1=input('是否进行文件转换:')
if int(step1)==0:
#filecsv=transfer_data(folder)
filecsv= "D:\\Database\\02China\\04Qixiang\\510000\\SURF_CHN_MUL_MDAY_19812010.csv"
'''第二步,提取每个站点的数据'''
step2=input("是否提取站点数据:")
if int(step2)==0:
file_station="D:\\Database\\02China\\04Qixiang\\SURF_CHN_MUL_STATION.xlsx"
jiwens=[0]
extract_data(filecsv, jiwens, file_station)
time_end = datetime.now()
print('结束时间:' + str(time_end))
time_last = time_end - time_start
print('用时' + str(time_last)) | hellboy1990/qixiang_explore | qixiang_check_v2.py | qixiang_check_v2.py | py | 7,538 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"u... |
73692412265 | import copy
import mock
import testtools
from stackalytics.processor import default_data_processor
from stackalytics.processor import normalizer
from stackalytics.tests.unit import test_data
class TestDefaultDataProcessor(testtools.TestCase):
def setUp(self):
super(TestDefaultDataProcessor, self).setUp()
self.get_users = mock.Mock(return_value=[
test_data.USERS,
])
normalized_data = copy.deepcopy(test_data.DEFAULT_DATA)
normalizer.normalize_default_data(normalized_data)
def tearDown(self):
super(TestDefaultDataProcessor, self).tearDown()
def test_normalizer(self):
data = copy.deepcopy(test_data.DEFAULT_DATA)
normalizer.normalize_default_data(data)
self.assertIn('releases', data['repos'][0])
self.assertEqual([], data['repos'][0]['releases'],
message='Empty list of releases expected')
self.assertEqual(0, data['users'][0]['companies'][-1]['end_date'],
message='The last company end date should be 0')
self.assertIn('user_id', data['users'][0])
self.assertEqual(test_data.USERS[0]['launchpad_id'],
data['users'][0]['user_id'],
message='User id should be set')
# verify that *independent company is added automatically
self.assertEqual(3, len(data['users'][1]['companies']))
self.assertEqual(0, data['users'][1]['companies'][-1]['end_date'],
message='The last company end date should be 0')
def test_update_project_list(self):
with mock.patch('stackalytics.processor.default_data_processor.'
'_retrieve_project_list_from_gerrit') as retriever:
retriever.return_value = [
{'module': 'nova',
'uri': 'git://git.openstack.org/openstack/nova',
'organization': 'openstack'},
{'module': 'qa', 'uri': 'git://git.openstack.org/openstack/qa',
'has_gerrit': True,
'organization': 'openstack'},
]
dd = {
'repos': [
{'module': 'qa',
'uri': 'git://git.openstack.org/openstack/qa',
'organization': 'openstack'},
{'module': 'tux',
'uri': 'git://git.openstack.org/stackforge/tux',
'organization': 'stackforge'},
],
'project_sources': [{'organization': 'openstack',
'uri': 'gerrit://'}],
'module_groups': [],
}
default_data_processor._update_project_list(dd)
self.assertEqual(3, len(dd['repos']))
self.assertIn('qa', set([r['module'] for r in dd['repos']]))
self.assertIn('nova', set([r['module'] for r in dd['repos']]))
self.assertIn('tux', set([r['module'] for r in dd['repos']]))
self.assertIn('has_gerrit', dd['repos'][0])
self.assertNotIn('has_gerrit', dd['repos'][1])
self.assertNotIn('has_gerrit', dd['repos'][2])
self.assertEqual(2, len(dd['module_groups']))
self.assertIn({'id': 'openstack',
'module_group_name': 'openstack',
'modules': ['qa', 'nova'],
'tag': 'organization'}, dd['module_groups'])
self.assertIn({'id': 'stackforge',
'module_group_name': 'stackforge',
'modules': ['tux'],
'tag': 'organization'}, dd['module_groups'])
def test_update_project_list_ext_project_source(self):
with mock.patch('stackalytics.processor.default_data_processor.'
'_retrieve_project_list_from_github') as retriever:
retriever.return_value = [
{'module': 'kubernetes',
'uri': 'git://github.com/kubernetes/kubernetes',
'organization': 'kubernetes'},
]
dd = {
'repos': [],
'project_sources': [
{'organization': 'kubernetes',
'uri': 'github://',
'module_group_id': 'kubernetes-group'},
],
'module_groups': [],
}
default_data_processor._update_project_list(dd)
self.assertEqual(1, len(dd['repos']))
self.assertIn('kubernetes',
set([r['module'] for r in dd['repos']]))
self.assertEqual(1, len(dd['module_groups']))
self.assertIn({'id': 'kubernetes-group',
'module_group_name': 'kubernetes',
'modules': ['kubernetes'],
'tag': 'organization'}, dd['module_groups'])
@mock.patch('stackalytics.processor.utils.read_json_from_uri')
def test_update_with_driverlog(self, mock_read_from_json):
default_data = {'repos': [{'module': 'cinder', }], 'users': []}
driverlog_dd = {'drivers': [{
'project_id': 'openstack/cinder',
'vendor': 'VMware',
'name': 'VMware VMDK Driver',
'ci': {
'id': 'vmwareminesweeper',
'success_pattern': 'Build successful',
'failure_pattern': 'Build failed'
}
}]}
mock_read_from_json.return_value = driverlog_dd
default_data_processor._update_with_driverlog_data(default_data, 'uri')
expected_user = {
'user_id': 'ci:vmware_vmdk_driver',
'user_name': 'VMware VMDK Driver',
'static': True,
'companies': [
{'company_name': 'VMware', 'end_date': None}],
}
self.assertIn(expected_user, default_data['users'])
self.assertIn(driverlog_dd['drivers'][0],
default_data['repos'][0]['drivers'])
@mock.patch('stackalytics.processor.utils.read_json_from_uri')
def test_update_with_driverlog_specific_repo(self, mock_read_from_json):
default_data = {'repos': [{'module': 'fuel-plugin-mellanox', }],
'users': []}
driverlog_dd = {'drivers': [{
'project_id': 'openstack/fuel',
'repo': 'stackforge/fuel-plugin-mellanox',
'vendor': 'Mellanox',
'name': 'ConnectX-3 Pro Network Adapter Support plugin',
'ci': {
'id': 'mellanox',
'success_pattern': 'SUCCESS',
'failure_pattern': 'FAILURE'
}
}]}
mock_read_from_json.return_value = driverlog_dd
default_data_processor._update_with_driverlog_data(default_data, 'uri')
expected_user = {
'user_id': 'ci:connectx_3_pro_network_adapter_support_plugin',
'user_name': 'ConnectX-3 Pro Network Adapter Support plugin',
'static': True,
'companies': [
{'company_name': 'Mellanox', 'end_date': None}],
}
self.assertIn(expected_user, default_data['users'])
self.assertIn(driverlog_dd['drivers'][0],
default_data['repos'][0]['drivers'])
| Mirantis/stackalytics | stackalytics/tests/unit/test_default_data_processor.py | test_default_data_processor.py | py | 7,360 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "testtools.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "stackalytics.tests.unit.test_data.USERS",
"line_number": 16,
"usage_type": "attribute"
},
{
"ap... |
40974261041 | #Import libraries
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Database Connection
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#Map database
Base = automap_base()
Base.prepare(engine, reflect = True)
#Reference known tables
measurement = Base.classes.measurement
station = Base.classes.station
#Set up flask
app = Flask(__name__)
#Flask Routes
@app.route("/")
def home():
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/[enter start date: yyyy-mm-dd] <br/>"
f"/api/v1.0/[enter start date: yyyy-mm-dd]/[enter end date: yyyy-mm-dd]"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
#Create session & query
session = Session(engine)
results = session.query(measurement.date, measurement.prcp).\
filter(measurement.date > "2016-08-23").all()
session.close()
#Create dictionary and append to list
prcp_list = []
for date, prcp in results:
prcp_dict = {}
prcp_dict['date'] = date
prcp_dict['prcp'] = prcp
prcp_list.append(prcp_dict)
#Return json
return jsonify(prcp_list)
@app.route("/api/v1.0/stations")
def stations():
#Create session & query
session = Session(engine)
stations = session.query(station.station).all()
station_list = list(np.ravel(stations))
session.close()
#Return json
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
#Create session & query
session = Session(engine)
tobs_results = session.query(measurement.station, measurement.date, measurement.tobs).\
filter(measurement.station == 'USC00519281').\
filter(measurement.date >'2016-08-23').all()
session.close()
#Create dictionary and append to list
tobs_list = []
for station, date, tobs in tobs_results:
tobs_dict = {}
tobs_dict['station'] = station
tobs_dict['date'] = date
tobs_dict['tobs'] = tobs
tobs_list.append(tobs_dict)
#Return json
return jsonify(tobs_list)
@app.route("/api/v1.0/<start_date>")
def Start_date(start_date):
#Create session & query
session = Session(engine)
start_results = session.query( func.avg(measurement.tobs), func.max(measurement.tobs), func.min(measurement.tobs)).\
filter(measurement.date >= start_date)
session.close()
#Create dictionary and append to list
tobs_start_list = []
for avg, max, min in start_results:
start_dict = {}
start_dict['avg'] = avg
start_dict['max'] = max
start_dict['min'] = min
tobs_start_list.append(start_dict)
#Return json
return jsonify(tobs_start_list)
@app.route("/api/v1.0/<start_date>/<end_date>")
def Start_end_date(start_date, end_date):
#Create session & query
session = Session(engine)
start_results = session.query( func.avg(measurement.tobs), func.max(measurement.tobs), func.min(measurement.tobs)).\
filter(measurement.date >= start_date).\
filter(measurement.date <= end_date)
session.close()
#Create dictionary and append to list
tobs_start_end_list = []
for avg, max, min in start_results:
start_end_dict = {}
start_end_dict['avg'] = avg
start_end_dict['max'] = max
start_end_dict['min'] = min
tobs_start_end_list.append(start_end_dict)
#Return json
return jsonify(tobs_start_end_list)
if __name__ == '__main__':
app.run(debug=True) | AJ-Paine/10-Hawaii-Temperature-Exploration | app.py | app.py | py | 3,710 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 21,
"usage_type": "call"
},
{
"api_name... |
323607876 | """added Client Favourite and product views
Revision ID: bcc08ae9bed7
Revises: 399549c08a2a
Create Date: 2020-01-24 22:55:04.098191
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'bcc08ae9bed7'
down_revision = '399549c08a2a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('client_favourite',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_view',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=True),
sa.Column('client_ip', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),
sa.ForeignKeyConstraint(['client_ip'], ['product.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('product_view')
op.drop_table('client_favourite')
# ### end Alembic commands ###
| Dsthdragon/kizito_bookstore | migrations/versions/bcc08ae9bed7_added_client_favourite_and_product_views.py | bcc08ae9bed7_added_client_favourite_and_product_views.py | py | 1,741 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
20885862962 | # flake8: noqa
import nltk
nltk.download("brown")
nltk.download("names")
import numpy as np
import multiprocessing as mp
import string
import spacy
import os
os.system("python -m spacy download en_core_web_sm")
from sklearn.base import TransformerMixin, BaseEstimator
from normalise import normalise
import pandas as pd
class TextPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, variety="BrE", user_abbrevs={}, n_jobs=1):
"""
Text preprocessing transformer includes steps:
1. Text normalization
2. Punctuation removal
3. Stop words removal
4. Lemmatization
variety - format of date (AmE - american type, BrE - british format)
user_abbrevs - dict of user abbreviations mappings (from normalise package)
n_jobs - parallel jobs to run
"""
self.variety = variety
self.user_abbrevs = user_abbrevs
self.n_jobs = n_jobs
self.nlp = spacy.load("en_core_web_sm")
def fit(self, X, y=None):
return self
def transform(self, X, *_):
X_copy = X.copy()
partitions = 1
cores = mp.cpu_count()
if self.n_jobs <= -1:
partitions = cores
elif self.n_jobs <= 0:
return X_copy.apply(self._preprocess_text)
else:
partitions = min(self.n_jobs, cores)
data_split = np.array_split(X_copy, partitions)
pool = mp.Pool(cores)
data = pd.concat(pool.map(self._preprocess_part, data_split))
pool.close()
pool.join()
return data
def _preprocess_part(self, part):
return part.apply(self._preprocess_text)
def _preprocess_text(self, text):
normalized_text = self._normalize(text)
doc = self.nlp(normalized_text)
removed_punct = self._remove_punct(doc)
removed_stop_words = self._remove_stop_words(removed_punct)
return self._lemmatize(removed_stop_words)
def _normalize(self, text):
# some issues in normalise package
try:
return " ".join(
normalise(
text,
variety=self.variety,
user_abbrevs=self.user_abbrevs,
verbose=False,
)
)
except:
return text
def _remove_punct(self, doc):
return [t for t in doc if t.text not in string.punctuation]
def _remove_stop_words(self, doc):
return [t for t in doc if not t.is_stop]
def _lemmatize(self, doc):
return " ".join([t.lemma_ for t in doc])
def preprocess_text(save_path):
df_bbc = pd.read_csv("data/bbc-text.csv")
text = TextPreprocessor(n_jobs=-1).transform(df_bbc["text"])
df_bbc["text"] = text
df_bbc.to_csv(save_path, sep=",", index=False)
return True
| Lolik-Bolik/Hashing_Algorithms | utils/process_book.py | process_book.py | py | 2,868 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.base.BaseEstimator",
... |
7147825929 | #!/usr/bin/env python3
import re
import sys
import linecache
from pathlib import Path
regex = re.compile('#?(.*)\s?=\s?(.*)')
data = ''
try:
fpath = str(sys.argv[1])
if not Path(fpath).is_file():
raise Exception("file path is invalid or not a file")
except:
print("Error: file not provided or invalid file.")
sys.exit(1)
try:
with open(fpath, 'r') as fh:
lines = fh.readlines()
for line in lines:
if re.search('^\n', line):
data += line
elif regex.search(line):
match = regex.search(line)
comment = ''
if re.search('^#.*', line):
comment = '#'
data += (comment + match.group(1).strip() + "={{ getenv(\"" + match.group(1).replace('.', '_').upper().strip() + "\", \"" + match.group(2).strip() + "\") }}" + "\n")
elif re.search('^#.*', line):
data += line
else:
pass
with open(fpath + '.new', 'w') as fh:
fh.write(data)
except Exception as err:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
| japtain-cack/docker-marid | files/configToRemco.py | configToRemco.py | py | 1,338 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 16... |
14049615322 | from django.shortcuts import render
from admin_dashboard.models import Review, Package
from django.contrib import messages
def home(request):
reviews = Review.objects.all()
packages = Package.objects.all()
return render(request, 'index.html', {
'title': 'Home',
'reviews': reviews,
'packages': packages,
'request': request,
})
def handler404(request, exception, template_name='404.html'):
response = render_to_response(template_name)
response.status_code = 404
return response
def handler500(request, exception, template_name='500.html'):
response = render_to_response(template_name)
response.status_code = 500
return response | aniatki/pro-dad | homepage/views.py | views.py | py | 709 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "admin_dashboard.models.Review.objects.all",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "admin_dashboard.models.Review.objects",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "admin_dashboard.models.Review",
"line_number": 6,
"usa... |
534546914 | import pygame as p
from Chess import ChessEngine, SmartMoveFinder, DataToLearn, VisualizData
import time
import xml.etree.ElementTree as gfg
import os.path
from Chess.DataTree import TreeNode
WIDTH = HEIGHT = 512
DIMENSION = 8
SQ_SIZE = HEIGHT // DIMENSION
MAX_FPS = 15
IMAGES = {}
WHITE_PIECE_CAPTUED = []
BLACK_PIECE_CAPTUED = []
BLACK_EVALUATION = 0.5 # Win percentage at start
test = p.Rect(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50) # Show win percentage
# Here we load the image ones
def loadImages():
pieces = ['P', 'R', 'N', 'B', 'K', 'Q', 'p', 'r', 'n', 'b', 'k', 'q']
pics_name = ['wP', 'wR', 'wN', 'wB', 'wK', 'wQ', 'p', 'r', 'n', 'b', 'k', 'q']
for i in range(len(pieces)):
IMAGES[pieces[i]] = p.transform.scale(p.image.load("allData/images/" + pics_name[i] + ".png"),
(SQ_SIZE, SQ_SIZE))
def main():
global MOVE_TIME_START
global TREE
global CURRENT_NODE
p.init()
# loading the history
if os.stat('Tree_history_next.xml').st_size == 0:
start = "<Level-0><State><FEN>rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR</FEN><ImportedEvaluation>0</ImportedEvaluation><Evaluation>0</Evaluation><Wins>0</Wins><Plays>0</Plays><Move /></State></Level-0>"
fileer = open("Tree_history_next.xml", "wb")
fileer.write(start)
fileer.close()
MOVE_TIME_START = time.time()
tree = gfg.parse('Tree_history_next.xml').getroot()
TREE = TreeNode([tree.getchildren()[0].getchildren()[0].text, tree.getchildren()[0].getchildren()[1].text,
tree.getchildren()[0].getchildren()[2].text, tree.getchildren()[0].getchildren()[3].text,
tree.getchildren()[0].getchildren()[4].text], "")
if len(tree.getchildren()[0].getchildren()) > 6:
TREE.read_tree(tree.getchildren()[0].getchildren()[6])
print("Time to load: ", time.time() - MOVE_TIME_START)
CURRENT_NODE = TREE
VisualizData.visualize_data_print(CURRENT_NODE)
loadImages()
moveSound = p.mixer.Sound('allData/sound/moveSound.wav')
conquerePieceSound = p.mixer.Sound('allData/sound/conquerePieceSound.mp3')
screen = p.display.set_mode((WIDTH + 400, HEIGHT + 80 + 60))
p.display.set_caption("K(ing) AI")
clock = p.time.Clock()
screen.fill(p.Color("beige"))
gs = ChessEngine.GameState()
gs.moveLogLong.append(CURRENT_NODE)
first_move = True
validMoves = gs.getValidMoves()
moveMade = False # flag variable for a valid move
animate = False # flag whether eh should animate
running = True
sqSelected = () # keep track of the last click of the user tuple (row,col)
playerClicks = [] # keeps track of player clicks (two tuples: [(row,col),(row,col)]
gameOver = False
playerOne = False # If human is playing white = true
playerTwo = False # If human is playing black = true
while running:
humanTurn = (gs.whiteToMove and playerOne) or (not gs.whiteToMove and playerTwo)
for e in p.event.get():
if e.type == p.QUIT:
running = False
# Mouse handler
elif e.type == p.MOUSEBUTTONDOWN:
if not gameOver and humanTurn:
location = p.mouse.get_pos() # (x,y) of mouse
col = (location[0] - 200) // SQ_SIZE
row = (location[1] - 10 - 60) // SQ_SIZE
if location[0] > 200 and location[0] < WIDTH + 200 and location[1] > 10 + 60 and location[
1] < HEIGHT + 10 + 60:
if sqSelected == (row, col): # check whether the user clicked the square twice
sqSelected = () # deselect
playerClicks = [] # clear player clicks
else:
sqSelected = (row, col)
playerClicks.append(sqSelected)
if len(playerClicks) == 2:
move = ChessEngine.Move(playerClicks[0], playerClicks[1], gs.board)
for i in range(len(validMoves)):
if move == validMoves[i]:
if len(CURRENT_NODE.children) < 1:
timer = time.time()
SmartMoveFinder.addToTree(gs, CURRENT_NODE)
print("time to add: ", time.time() - timer)
gs.makeMove(validMoves[i])
CURRENT_NODE = changeCurrentNode(gs)
gs.moveLogLong.append(CURRENT_NODE)
moveMade = True
animate = True
first_move = False
print("Player-time: ", time.time() - MOVE_TIME_START)
MOVE_TIME_START = time.time()
if move.pieceCaptured != "--":
conquerePieceSound.play()
else:
moveSound.play()
# recalculate the win percentage
BLACK_EVALUATION = 0.5 - float(CURRENT_NODE.evaluation) / 1000
if BLACK_EVALUATION > 0.99:
BLACK_EVALUATION = 0.99
test.update(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50)
sqSelected = ()
playerClicks = []
if not moveMade:
playerClicks = [sqSelected]
# key handers
elif e.type == p.KEYDOWN:
if e.key == p.K_z: # undo if "z" is pressed
gs.undoMove()
moveMade = True
animate = False
gameOver = False
if e.key == p.K_r: # reset the board when pressing "r"
gs = ChessEngine.GameState()
validMoves = gs.getValidMoves()
sqSelected = ()
playerClicks = []
moveMade = False
animate = False
gameOver = False
if e.key == p.K_1: # save data
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
if e.key == p.K_2:
MOVE_TIME_START = time.time()
# updatePlayAndWins(gs) # save?
gs.checkMate = True
gameOver = True
print("Time to update the data: ", time.time() - MOVE_TIME_START)
if e.key == p.K_3:
gs.staleMate = True
gameOver = True
# white lost (from surrender)
if e.key == p.K_4:
if gs.whiteToMove:
gs.checkMate = True
gameOver = True
else:
gs.whiteToMove = True
gs.checkMate = True
gameOver = True
# black lost (from surrender)
if e.key == p.K_5:
if not gs.whiteToMove:
gs.checkMate = True
gameOver = True
else:
gs.whiteToMove = False
gs.checkMate = True
gameOver = True
# AI Movefinder
if not gameOver and not humanTurn:
if len(CURRENT_NODE.children) < 1:
timer = time.time()
SmartMoveFinder.addToTree(gs, CURRENT_NODE)
print("time to add: ", time.time() - timer)
timerrr = time.time()
AImove = SmartMoveFinder.findBestMoveMinMax(gs, validMoves, CURRENT_NODE)
print("find new Move: ", time.time() - timerrr)
if AImove is None:
AImove = SmartMoveFinder.findRandomMove(validMoves)
gs.makeMove(AImove)
moveMade = True
animate = True
CURRENT_NODE = changeCurrentNode(gs)
gs.moveLogLong.append(CURRENT_NODE)
print("AI: ", time.time() - MOVE_TIME_START)
MOVE_TIME_START = time.time()
if AImove.pieceCaptured != "--":
conquerePieceSound.play()
else:
moveSound.play()
# recalculate the win percentage
BLACK_EVALUATION = 0.5 - float(CURRENT_NODE.evaluation) / 1000
if BLACK_EVALUATION > 0.99:
BLACK_EVALUATION = 0.99
test.update(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50)
if moveMade:
if animate:
animateMove(gs.moveLog[-1], screen, gs.board, clock)
validMoves = gs.getValidMoves()
moveMade = False
animate = False
drawGameState(screen, gs, validMoves, sqSelected)
if gs.checkMate:
gameOver = True
if gs.whiteToMove:
font1 = p.font.SysFont('Black wins by checkmate', 64)
img1 = font1.render('Black wins by checkmate', True, "dark red")
screen.blit(img1, (210, 280))
else:
font1 = p.font.SysFont('White wins by checkmate', 64)
img1 = font1.render('White wins by checkmate', True, "dark red")
screen.blit(img1, (210, 280))
# drawText(screen, 'White wins by checkmate')
updatePlayAndWins(gs)
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
running = False
time.sleep(60)
elif gs.staleMate:
font1 = p.font.SysFont('Stalemate', 64)
img1 = font1.render('Stalemate', True, "dark red")
screen.blit(img1, (210, 280))
updatePlayAndWins(gs)
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
running = False
time.sleep(60)
clock.tick(MAX_FPS)
p.display.flip()
# Higlicght the square selected
def highlightSqaures(screen, gs, validMoves, sqSelected):
if sqSelected != ():
r, c = sqSelected
if (gs.board[r][c].islower() and not gs.whiteToMove) or (not gs.board[r][c].islower() and gs.whiteToMove):
# if gs.board[r][c][0] == ('w' if gs.whiteToMove else 'b'): # sqSelected a piece that can be moved
# highlight selected squares
s = p.Surface((SQ_SIZE, SQ_SIZE))
s.set_alpha(100) # transparancy value
s.fill(p.Color('blue'))
screen.blit(s, ((c * SQ_SIZE) + 200, (r * SQ_SIZE) + 10 + 60))
# highlight moves from that square
s.fill(p.Color('yellow'))
for move in validMoves:
if move.startRow == r and move.startCol == c:
screen.blit(s, ((SQ_SIZE * move.endCol) + 200, (move.endRow * SQ_SIZE) + 10 + 60))
def drawGameState(screen, gs, validMoves, sqSelected):
drawBoard(screen) # draw squares on the board
highlightSqaures(screen, gs, validMoves, sqSelected)
highlightLastMove(gs, screen)
drawPieces(screen, gs.board) # draw pieces on the top of those squares
def drawBoard(screen):
global colors
p.draw.rect(screen, "black", p.Rect(194, 64, HEIGHT + 12, WIDTH + 12))
p.draw.rect(screen, "white", p.Rect(200 + WIDTH / 4, 10, WIDTH / 4, 40))
p.draw.rect(screen, "grey", p.Rect(200 + WIDTH / 2, 10, WIDTH / 4, 40))
p.draw.rect(screen, "white", p.Rect(200, HEIGHT + 20 + 60, WIDTH, 50))
p.draw.rect(screen, "grey", test)
colors = [p.Color("white"), p.Color("grey")]
for r in range(DIMENSION):
for c in range(DIMENSION):
color = colors[((r + c) % 2)]
p.draw.rect(screen, color, p.Rect(c * SQ_SIZE + 200, r * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
def drawPieces(screen, board):
for r in range(DIMENSION):
for c in range(DIMENSION):
piece = board[r][c]
if piece != "--":
screen.blit(IMAGES[piece], p.Rect(c * SQ_SIZE + 200, r * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
def animateMove(move, screen, board, clock):
global colors
dR = move.endRow - move.startRow
dC = move.endCol - move.startCol
framesPerSquare = 4 # frames to move one square
frameCount = (abs(dR) + abs(dC)) * framesPerSquare
for frame in range(frameCount + 1):
r, c = (move.startRow + dR * frame / frameCount, move.startCol + dC * frame / frameCount)
drawBoard(screen)
drawPieces(screen, board)
# erase the piece moved from its ending sqaure
color = colors[(move.endRow + move.endCol) % 2]
endSquare = p.Rect((move.endCol * SQ_SIZE) + 200, (move.endRow * SQ_SIZE) + 10 + 60, SQ_SIZE, SQ_SIZE)
p.draw.rect(screen, color, endSquare)
# draw captured piece onto rectangle
if move.pieceCaptured != '--':
screen.blit(IMAGES[move.pieceCaptured], endSquare)
# draw moving piece
screen.blit(IMAGES[move.pieceMoved], p.Rect((c * SQ_SIZE) + 200, (r * SQ_SIZE) + 10 + 60, SQ_SIZE, SQ_SIZE))
p.display.flip()
clock.tick(60)
if move.pieceCaptured.isupper():
screen.blit(IMAGES[move.pieceCaptured],
p.Rect(((len(WHITE_PIECE_CAPTUED) % 3)) * SQ_SIZE,
(len(WHITE_PIECE_CAPTUED) // 3) * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
WHITE_PIECE_CAPTUED.append(move.pieceCaptured)
elif move.pieceCaptured.islower():
screen.blit(IMAGES[move.pieceCaptured],
p.Rect(((len(BLACK_PIECE_CAPTUED) % 3)) * SQ_SIZE + WIDTH + 210,
(len(BLACK_PIECE_CAPTUED) // 3) * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
BLACK_PIECE_CAPTUED.append(move.pieceCaptured)
# highlight last move made
def highlightLastMove(gs, screen):
if len(gs.moveLog) > 0:
s = p.Surface((SQ_SIZE, SQ_SIZE))
s.set_alpha(100) # transparancy value
s.fill(p.Color('red'))
screen.blit(s, ((gs.moveLog[-1].startCol * SQ_SIZE) + 200, (gs.moveLog[-1].startRow * SQ_SIZE) + 10 + 60))
screen.blit(s, ((gs.moveLog[-1].endCol * SQ_SIZE) + 200, (gs.moveLog[-1].endRow * SQ_SIZE) + 10 + 60))
def drawText(screen, text):
font = p.font.SysFont("Helvitca", 32, True, False)
textObject = font.render(text, 0, p.Color('Gray'))
textLocation = p.Rect(0, 0, WIDTH, HEIGHT).move(WIDTH / 2 - textObject.get_width() / 2,
HEIGHT / 2 - textObject.get_height() / 2)
screen.blit(textObject, textLocation)
textObject = font.render(text, 0, p.Color('Black'))
screen.blit(textObject, textLocation.move(2, 2))
# change the current Node
def changeCurrentNode(gs):
fen_now = DataToLearn.createFEN(gs)
for states in CURRENT_NODE.children:
if states.fEN == fen_now:
return states
def updatePlayAndWins(gs):
print (len(gs.moveLogLong))
gs.checkMate = True
TREE.plays = 1 + int(TREE.plays)
if gs.checkMate and not gs.whiteToMove:
TREE.wins = int(TREE.wins) + 1
helperUpdatePlayAndWins(gs, TREE, level=1)
def helperUpdatePlayAndWins(gs, current_node, level):
print(level)
for state in current_node.children:
if state == gs.moveLogLong[level]:
state.plays = float(state.plays) + 1
current_node = state
if gs.checkMate and not gs.whiteToMove:
state.wins = float(state.wins) + 1
elif gs.staleMate:
state.wins = float(state.wins) + 0.5
if len(gs.moveLogLong) - 1 > level:
helperUpdatePlayAndWins(gs, current_node, level + 1)
if __name__ == "__main__":
main()
# todo: Add more evaluations (pieces covered)
# todo: try to get evaluation from the imported data
# todo: Add Player Specific Bot
# todo: make heurustic: which is the first move to watch (makes it faster)
| KaiBaeuerle/chessAI | Chess/ChessMain.py | ChessMain.py | py | 16,775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.Rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.image... |
33865589189 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('bot', '0007_bot_logo'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=datetime.datetime(2016, 5, 15, 20, 22, 16, 862988))),
('log', models.TextField()),
('bot', models.ForeignKey(to='bot.Bot')),
],
),
]
| DenerRodrigues/Chatterbot | bot/migrations/0008_chat.py | 0008_chat.py | py | 681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 15,
"usage_type": "call"
},
... |
40568030755 | from __future__ import annotations
import random
from datetime import timedelta
from typing import Type
from game.theater import FrontLine
from game.utils import Distance, Speed, feet
from .capbuilder import CapBuilder
from .invalidobjectivelocation import InvalidObjectiveLocation
from .patrolling import PatrollingFlightPlan, PatrollingLayout
from .waypointbuilder import WaypointBuilder
class BarCapFlightPlan(PatrollingFlightPlan[PatrollingLayout]):
@staticmethod
def builder_type() -> Type[Builder]:
return Builder
@property
def patrol_duration(self) -> timedelta:
return self.flight.coalition.doctrine.cap_duration
@property
def patrol_speed(self) -> Speed:
return self.flight.unit_type.preferred_patrol_speed(
self.layout.patrol_start.alt
)
@property
def engagement_distance(self) -> Distance:
return self.flight.coalition.doctrine.cap_engagement_range
class Builder(CapBuilder[BarCapFlightPlan, PatrollingLayout]):
def layout(self) -> PatrollingLayout:
location = self.package.target
if isinstance(location, FrontLine):
raise InvalidObjectiveLocation(self.flight.flight_type, location)
start_pos, end_pos = self.cap_racetrack_for_objective(location, barcap=True)
preferred_alt = self.flight.unit_type.preferred_patrol_altitude
randomized_alt = preferred_alt + feet(random.randint(-2, 1) * 1000)
patrol_alt = max(
self.doctrine.min_patrol_altitude,
min(self.doctrine.max_patrol_altitude, randomized_alt),
)
builder = WaypointBuilder(self.flight, self.coalition)
start, end = builder.race_track(start_pos, end_pos, patrol_alt)
return PatrollingLayout(
departure=builder.takeoff(self.flight.departure),
nav_to=builder.nav_path(
self.flight.departure.position, start.position, patrol_alt
),
nav_from=builder.nav_path(
end.position, self.flight.arrival.position, patrol_alt
),
patrol_start=start,
patrol_end=end,
arrival=builder.land(self.flight.arrival),
divert=builder.divert(self.flight.divert),
bullseye=builder.bullseye(),
)
def build(self, dump_debug_info: bool = False) -> BarCapFlightPlan:
return BarCapFlightPlan(self.flight, self.layout())
| dcs-liberation/dcs_liberation | game/ato/flightplans/barcap.py | barcap.py | py | 2,444 | python | en | code | 647 | github-code | 36 | [
{
"api_name": "patrolling.PatrollingFlightPlan",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "patrolling.PatrollingLayout",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 17,
"usage_type": "name"
},
{
"api_name"... |
7704128693 | # -*- coding:utf-8 -*-
"""
Evluate the performance of embedding via different methods.
"""
import math
import numpy as np
from sklearn import metrics
from sklearn import utils as sktools
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, balanced_accuracy_score, f1_score, precision_score, \
recall_score
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
def cluster_evaluate(embeddings, labels, n_class, metric="euclidean"):
"""
Unsupervised setting: We assess the ability of each method to embed close together nodes
with the same ground-truth structural role. We use agglomerative clustering (with single linkage)
to cluster embeddings learned by each method and evaluate the clustering quality via:
(1) homogeneity, conditional entropy of ground-truth structural roles given the predicted clustering;
(2) completeness, a measure of how many nodes with the same ground-truth structural role are assigned to the same cluster;
(3) silhouette score, a measure of intra-cluster distance vs. inter-cluster distance.
Supervised setting: We assess the performance of learned embeddings for node classifcation.
Using 10-fold cross validation, we predict the structural role (label) of each node in the tests set
based on its 4-nearest neighbors in the training set as determined by the embedding space.
The reported score is then the average accuracy and F1-score over 25 trials.
"""
clusters = AgglomerativeClustering(n_clusters=n_class, linkage='single', affinity=metric).fit_predict(embeddings)
h, c, v = metrics.homogeneity_completeness_v_measure(labels, clusters)
s = metrics.silhouette_score(embeddings, clusters)
acc = accuracy_score(labels, clusters)
macro_f1 = f1_score(labels, clusters, average="macro")
print("cluster:", clusters, "labels:", labels)
print("accuracy: ", acc)
print("macro_score: ", macro_f1)
print("homogeneity: ", h)
print("completeness: ", c)
print("v-score: ", v)
print("silhouette: ", s)
return h, c, v, s
def LR_evaluate(data, labels, cv=5):
"""
Evaluate embedding effect using Logistic Regression. Mode = One vs Rest (OVR)
"""
data, labels = sktools.shuffle(data, labels)
lr = LogisticRegression(solver="lbfgs", penalty='l2', max_iter=1000, multi_class='ovr')
test_scores = cross_val_score(lr, data, y=labels, cv=cv)
print(f"LR: tests scores={test_scores}, mean_score={np.mean(test_scores)}\n")
return np.mean(test_scores)
def KNN_evaluate(data, labels, metric="minkowski", cv=5, n_neighbor=10):
"""
基于节点的相似度进行KNN分类,在嵌入之前进行,为了验证通过层次化相似度的优良特性。
"""
data, labels = sktools.shuffle(data, labels)
knn = KNeighborsClassifier(weights='uniform', algorithm="auto", n_neighbors=n_neighbor, metric=metric, p=2)
test_scores = cross_val_score(knn, data, y=labels, cv=cv, scoring="accuracy")
print(f"KNN: tests scores:{test_scores}, mean_score={np.mean(test_scores)}\n")
return np.mean(test_scores)
def evalute_results(labels: list, preds: list):
accuracy = accuracy_score(labels, preds)
balanced_accuracy = balanced_accuracy_score(labels, preds)
precision = precision_score(labels, preds, average="micro")
recall = recall_score(labels, preds, average="micro")
macro_f1 = f1_score(labels, preds, average="macro")
micro_f1 = f1_score(labels, preds, average="micro")
report = classification_report(labels, preds, digits=7)
res = { "accuracy": accuracy,
"balanced accuracy": balanced_accuracy,
"micro precision": precision,
"micro recall": recall,
"macro f1": macro_f1,
"micro f1": micro_f1,
"report": report
}
print(res)
return res
def spectral_cluster_evaluate(data, labels, n_cluster, affinity="rbf"):
"""
:param data: 相似度矩阵 or 嵌入向量
:param n_cluster:
:param affinity: precomputed || rbf
:return:
"""
metric = "euclidean"
if affinity == "precomputed":
# sklearn指导,如果data是距离矩阵而不是相似度矩阵,则可以用下面的rbf转换一下
distance_mat = data
delta = math.sqrt(2)
data = np.exp(-distance_mat ** 2 / (2. * delta ** 2))
metric = affinity
clustering = SpectralClustering(n_clusters=n_cluster, affinity=affinity, n_init=50, random_state=42)
preds = clustering.fit_predict(data)
h, c, v = metrics.homogeneity_completeness_v_measure(labels, preds)
s1 = metrics.silhouette_score(embeddings, labels, metric=metric)
s2 = metrics.silhouette_score(embeddings, preds, metric=metric)
print(f"homogenetiy: {h}, completeness: {c}, v_measure: {v}, silhouette_score label: {s1}, silhouette_score pred: {s2}\n")
| Sngunfei/HSD | tools/evaluate.py | evaluate.py | py | 5,117 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sklearn.cluster.AgglomerativeClustering",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.homogeneity_completeness_v_measure",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 35,
"usage_ty... |
21393775413 | """
5 element API Client
"""
from typing import Optional, Tuple
from bgd.constants import FIFTHELEMENT
from bgd.responses import GameSearchResult, Price
from bgd.services.abc import GameSearchResultFactory
from bgd.services.api_clients import GameSearcher, JsonHttpApiClient
from bgd.services.base import CurrencyExchangeRateService, GameSearchService
from bgd.services.constants import GET
from bgd.services.responses import APIResponse
class FifthElementApiClient(JsonHttpApiClient):
"""Api client for 5element.by"""
BASE_SEARCH_URL = "https://api.multisearch.io"
async def search(self, query: str, options: Optional[dict] = None) -> APIResponse:
"""Search query string"""
search_app_id = options["search_app_id"] # type: ignore
url = f"?query={query}&id={search_app_id}&lang=ru&autocomplete=true"
return await self.connect(GET, self.BASE_SEARCH_URL, url)
class FifthElementSearchService(GameSearchService):
"""Search service for 5element.by"""
def __init__(
self,
client: GameSearcher,
result_factory: GameSearchResultFactory,
currency_exchange_rate_converter: CurrencyExchangeRateService,
game_category_id: str,
search_app_id: str,
) -> None:
"""Init 5th element Search Service"""
# there are more than one category that we should check
self._game_category_ids = game_category_id.split(",")
super().__init__(client, result_factory, currency_exchange_rate_converter)
self._search_app_id = search_app_id
async def do_search(self, query: str, *args, **kwargs) -> Tuple[GameSearchResult]:
response = await self._client.search(query, {"search_app_id": self._search_app_id})
products = self.filter_results(
response.response["results"]["items"], self._is_available_game
)
return self.build_results(products)
def _is_available_game(self, product: dict) -> bool:
"""True if it's available board game"""
return (
product["is_presence"]
and product["params_data"]["category_id"] in self._game_category_ids
)
class FifthElementGameSearchResultFactory:
"""Builder for GameSearch results from 5element"""
BASE_URL = "https://5element.by"
def create(self, search_result: dict) -> GameSearchResult:
"""Build search result"""
return GameSearchResult(
description="",
images=self._extract_images(search_result),
location=None,
owner=None,
prices=[self._extract_price(search_result)],
source=FIFTHELEMENT,
subject=search_result["name"],
url=self._extract_url(search_result),
)
@staticmethod
def _extract_images(product: dict) -> list[str]:
"""Extract product images"""
return [product["picture"]]
@staticmethod
def _extract_price(product: dict) -> Optional[Price]:
"""Extract price"""
price = product["price"] * 100
return Price(amount=price)
def _extract_url(self, product: dict) -> str:
"""Extract product url"""
return f"{self.BASE_URL}{product['url']}"
| ar0ne/bg_deal | bgd/services/apis/fifth_element.py | fifth_element.py | py | 3,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bgd.services.api_clients.JsonHttpApiClient",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "bgd.services.constants.GET",
"line_number": 24,
"usage_type": "argument"
},
... |
262396930 | from bs4 import BeautifulSoup
import requests
import requests # request img from web
import shutil # save img locally
URL1 = 'http://localhost'
page = requests.get(URL1)
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.prettify())
image_tags = soup.find_all('img')
for image_tag in image_tags:
url = image_tag['src']
res = requests.get(url, stream=True)
url = url[url.rfind("/")+1:]
print(url)
with open("res/img/ "+url, 'wb') as f:
shutil.copyfileobj(res.raw, f)
print('Image sucessfully Downloaded: ', url)
| FukudaYoshiro/singo | saudi/import image.py | import image.py | py | 555 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "shutil.copyfileobj",
"l... |
16858290153 | # Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import deque
deq = deque()
for i in range(int(input())):
command = input().split()
if command[0] == 'append':
deq.append(command[1])
elif command[0] == 'appendleft':
deq.appendleft(command[1])
elif command[0] == 'pop':
deq.pop()
elif command[0] == 'popleft':
deq.popleft()
print(*deq)
'''EVAL is not the answer to everything.
AND it's dangerous''' | polemeest/daily_practice | hackerrank_deque.py | hackerrank_deque.py | py | 490 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
}
] |
7136369222 | # -*- coding: utf-8 -*-
# ***************************************************
# * File : timefeatures.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-04-19
# * Version : 0.1.041901
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
ROOT = os.getcwd()
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
from sklearn.preprocessing import OneHotEncoder
from utils_func import is_weekend
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
def feature_engineer(df):
"""
特征工程
1. 时间戳特征
2. 差分序列
3. 同时刻风场、邻近风机的特均值、标准差
"""
df["month"] = df.DATATIME.apply(lambda x: x.month, 1)
df["day"] = df.DATATIME.apply(lambda x: x.day, 1)
df["weekday"] = df.DATATIME.apply(lambda x: x.weekday(), 1)
df["hour"] = df.DATATIME.apply(lambda x: x.hour, 1)
df["minute"] = df.DATATIME.apply(lambda x: x.minute, 1)
return df
def time_static_features(series,
datetime_format: str = '%Y-%m-%d %H:%M:%S',
datetime_is_index: bool = False,
datetime_name: str = None,
target_name: str = None,
features: List = []) -> pd.DataFrame:
"""
时间特征提取
Args:
data ([type]): 时间序列
datetime_format ([type]): 时间特征日期时间格式
datetime_is_index (bool, optional): 时间特征是否为索引. Defaults to False.
datetime_name ([type], optional): 时间特征名称. Defaults to None.
features: 最后返回的特征名称列表
"""
data = series.copy()
# 日期时间特征处理
if datetime_is_index:
data["DT"] = data.index
data["DT"] = pd.to_datetime(data["DT"], format = datetime_format)
else:
data[datetime_name] = pd.to_datetime(data[datetime_name], format = datetime_format)
data["DT"] = data[datetime_name]
# 时间日期特征
data["date"] = data["DT"].apply(lambda x: x.date) # 日期
data["time"] = data["DT"].apply(lambda x: x.time) # 时间
data["year"] = data["DT"].apply(lambda x: x.year) # 年
data["is_year_start"] = data["DT"].apply(lambda x: x.is_year_start) # 是否年初
data["is_year_end"] = data["DT"].apply(lambda x: x.is_year_end) # 是否年末
data["is_leap_year"] = data["DT"].apply(lambda x: x.is_leap_year) # 是否是闰年
data["quarter"] = data["DT"].apply(lambda x: x.quarter) # 季度
data["is_quarter_start"] = data["DT"].apply(lambda x: x.is_quarter_start) # 是否季度初
data["is_quarter_end"] = data["DT"].apply(lambda x: x.is_quarter_end) # 是否季度末
# TODO 季节
# TODO 业务季度
data["month"] = data["DT"].apply(lambda x: x.month) # 月
data["is_month_start"] = data["DT"].apply(lambda x: x.is_month_start) # 是否月初
data["is_month_end"] = data["DT"].apply(lambda x: x.is_month_end) # 是否月末
data["daysinmonth"] = data["DT"].apply(lambda x: x.daysinmonth) # 每个月的天数
# TODO 每个月中的工作日天数
# TODO 每个月中的休假天数
# TODO 是否夏时制
data["weekofyear"] = data["DT"].apply(lambda x: x.isocalendar().week) # 一年的第几周
# TODO 一月中的第几周
data["dayofyear"] = data["DT"].apply(lambda x: x.dayofyear) # 一年的第几天
data["dayofmonth"] = data["DT"].apply(lambda x: x.day) # 日(一月中的第几天)
data["dayofweek"] = data["DT"].apply(lambda x: x.dayofweek) # 一周的第几天
data["is_weekend"] = data['dayofweek'].apply(is_weekend) # 是否周末
# TODO data["is_holiday"] = data["DT"].apply(is_holiday) # 是否放假/是否工作日/是否节假日
# TODO 节假日连续天数
# TODO 节假日前第 n 天
# TODO 节假日第 n 天
# TODO 节假日后第 n 天
# TODOdata["is_tiaoxiu"] = data["DT"].apply(is_tiaoxiu) # 是否调休
data["hour"] = data["DT"].apply(lambda x: x.hour) # 时(一天过去了几分钟)
data["minute"] = data["DT"].apply(lambda x: x.minute) # 分
# TODO data["past_minutes"] = data["DT"].apply(past_minutes) # 一天过去了几分钟
data["second"] = data["DT"].apply(lambda x: x.second) # 秒
data["microsecond"] = data["DT"].apply(lambda x: x.microsecond) # 微妙
data["nanosecond"] = data["DT"].apply(lambda x: x.nanosecond) # 纳秒
# TODO data["time_period"] = data["DT"].apply(time_period) # 一天的哪个时间段
data["day_high"] = data["hour"].apply(lambda x: 0 if 0 < x < 8 else 1) # 是否为高峰期
# TODO data["is_work"] = data["hour"].apply(is_work) # 该时间点是否营业/上班
del data["DT"]
if features == []:
selected_features = data
else:
selected_features = data[features]
return selected_features
def time_dynamic_features(series,
n_lag: int = 1,
n_fut: int = 1,
selLag = None,
selFut = None,
dropnan = True):
"""
Converts a time series to a supervised learning data set by adding time-shifted
prior and future period data as input or output (i.e., target result) columns for each period.
Params:
data: a series of periodic attributes as a list or NumPy array.
n_lag: number of PRIOR periods to lag as input (X); generates: Xa(t-1), Xa(t-2); min = 0 --> nothing lagged.
n_fut: number of FUTURE periods to add as target output (y); generates Yout(t+1); min = 0 --> no future periods.
selLag: only copy these specific PRIOR period attributes; default = None; EX: ['Xa', 'Xb' ].
selFut: only copy these specific FUTURE period attributes; default = None; EX: ['rslt', 'xx'].
dropnan: True = drop rows with NaN values; default = True.
Returns:
a Pandas DataFrame of time series data organized for supervised learning.
NOTES:
(1) The current period's data is always included in the output.
(2) A suffix is added to the original column names to indicate a relative time reference:
e.g.(t) is the current period;
(t-2) is from two periods in the past;
(t+1) is from the next period.
(3) This is an extension of Jason Brownlee's series_to_supervised() function, customized for MFI use
"""
data = series.copy()
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
origNames = df.columns
cols, names = list(), list()
# include all current period attributes
cols.append(df.shift(0))
names += [("%s" % origNames[j]) for j in range(n_vars)]
# ----------------------------------------------------
# lag any past period attributes (t-n_lag, ..., t-1)
# ----------------------------------------------------
n_lag = max(0, n_lag)
# input sequence (t-n, ..., t-1)
for i in range(n_lag, 0, -1):
suffix = "(t-%d)" % i
if (selLag is None):
cols.append(df.shift(i))
names += [("%s%s" % (origNames[j], suffix)) for j in range(n_vars)]
else:
for var in (selLag):
cols.append(df[var].shift(i))
names += [("%s%s" % (var, suffix))]
# ----------------------------------------------------
# include future period attributes (t+1, ..., t+n_fut)
# ----------------------------------------------------
n_fut = max(0, n_fut)
# forecast sequence (t, t+1, ..., t+n)
for i in range(0, n_fut + 1):
suffix = "(t+%d)" % i
if (selFut is None):
cols.append(df.shift(-i))
names += [("%s%s" % (origNames[j], suffix)) for j in range(n_vars)]
else:
for var in (selFut):
cols.append(df[var].shift(-i))
names += [("%s%s" % (var, suffix))]
# ----------------------------------------------------
# put it all together
# ----------------------------------------------------
agg = pd.concat(cols, axis = 1)
agg.columns = names
# ----------------------------------------------------
# drop rows with NaN values
# ----------------------------------------------------
if dropnan:
agg.dropna(inplace = True)
return agg
# TODO
def get_time_sin_cos(data: pd.DataFrame, col: str, n: int, one_hot: bool = False, drop: bool = True):
"""
构造时间特征
取 cos/sin 将数值的首位衔接起来, 比如说 23 点与 0 点很近, 星期一和星期天很近
Args:
data (_type_): _description_
col (_type_): column name
n (_type_): 时间周期
one_hot (bool, optional): _description_. Defaults to False.
drop (bool, optional): _description_. Defaults to True.
Returns:
_type_: _description_
"""
data[col + '_sin'] = round(np.sin(2 * np.pi / n * data[col]), 6)
data[col + '_cos'] = round(np.cos(2 * np.pi / n * data[col]), 6)
if one_hot:
ohe = OneHotEncoder()
X = ohe.fit_transform(data[col].values.reshape(-1, 1)).toarray()
df = pd.DataFrame(X, columns = [col + '_' + str(int(i)) for i in range(X.shape[1])])
data = pd.concat([data, df], axis = 1)
if drop:
data = data.drop(col, axis = 1)
return data
# TODO
def gen_lag_features(data, cycle):
"""
时间序列滞后性特征
- 二阶差分
Args:
data ([type]): 时间序列
cycle ([type]): 时间序列周期
"""
# 序列平稳化, 季节性差分
series_diff = data.diff(cycle)
series_diff = series_diff[cycle:]
# 监督学习的特征
for i in range(cycle, 0, -1):
series_diff["t-" + str(i)] = series_diff.shift(i).values[:, 0]
series_diff["t"] = series_diff.values[:, 0]
series_diff = series_diff[cycle + 1:]
return series_diff
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""
Second of Minute encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""
Minute of hour encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""
Day of month encoded as value between [-0.5, 0.5]
"""
# TODO
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""
Day of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""
Month of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""
Week of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Args:
freq_str (str): Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
Raises:
RuntimeError: _description_
Returns:
List[TimeFeature]: _description_
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f'''
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
'''
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc = 1, freq = "h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column
> and extracts the date down to `freq` where freq can be any of the
> following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values
> are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates['month'] = dates.date.apply(lambda row: row.month, 1)
dates['day'] = dates.date.apply(lambda row: row.day, 1)
dates['weekday'] = dates.date.apply(lambda row: row.weekday(), 1)
dates['hour'] = dates.date.apply(lambda row: row.hour, 1)
dates['minute'] = dates.date.apply(lambda row: row.minute, 1)
dates['minute'] = dates.minute.map(lambda x: x // 15)
freq_map = {
'y':[],
'm':['month'],
'w':['month'],
'd':['month', 'day', 'weekday'],
'b':['month', 'day', 'weekday'],
'h':['month', 'day', 'weekday', 'hour'],
't':['month', 'day', 'weekday', 'hour', 'minute'],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack([
feat(dates) for feat in time_features_from_frequency_str(freq)
]).transpose(1, 0)
# 测试代码 main 函数
def main():
dates = pd.to_datetime([
"2023-01-01 01:01:05", "2023-01-01 01:01:10",
"2023-01-01 01:01:15", "2023-01-01 01:01:20",
"2023-01-01 01:01:25"
])
res = time_features(dates, freq = "5s")
print(res)
res2 = time_features_from_frequency_str("5s")
print(res2)
data = None
data_df = gen_time_features(data)
data_df = get_time_fe(data_df, 'hour', n = 24, one_hot = False, drop = False)
data_df = get_time_fe(data_df, 'day', n = 31, one_hot = False, drop = True)
data_df = get_time_fe(data_df, 'dayofweek', n = 7, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'season', n = 4, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'month', n = 12, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'weekofyear', n = 53, one_hot = False, drop = True)
# data
series = pd.read_csv(
"/Users/zfwang/machinelearning/datasets/car-sales.csv",
header = 0,
index_col = 0
)
# gen features
ts2df = Timeseries2Dataframe()
series = ts2df.timeseries2dataframe(
data = series,
n_lag = 12,
n_fut = 0,
selLag = None,
selFut = None,
dropnan = True
)
ts2df.analysis_features_select(series, "Sales")
ts2df.features_select(series, "Sales")
if __name__ == "__main__":
main()
| wangzhefeng/tsproj | utils/timefeatures.py | timefeatures.py | py | 17,505 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number":... |
28297076587 | #!/bin/python3
"""
Creates new training data for all language directions (META_LANGS) according to META_RECIPES.
This may take some time, but is written in a functional way, so the files are not loaded into memory
in larger pieces.
"""
import file_utils
from recipes import META_RECIPES
import os
import argparse
parser = argparse.ArgumentParser(description='Create experiment datasets')
parser.add_argument('-r', '--recipes', nargs='+',
help='Recipes', required=True)
parser.add_argument('-l', '--langs', nargs='+',
help='Languages', required=True)
args = parser.parse_args()
args.recipes = set(args.recipes)
META_LANGS = {
'csen': 'cs-en',
'encs': 'cs-en',
'ende': 'de-en',
}
for lpairtrue, lpairorig in META_LANGS.items():
if not lpairtrue in args.langs:
continue
lsrc = lpairtrue[:2]
ltgt = lpairtrue[2:]
for meta_recipe_name, meta_recipe_generator in META_RECIPES.items():
if not meta_recipe_name in args.recipes:
continue
print(f'Processing recipe #{meta_recipe_name} for {lpairtrue}')
meta_recipe = {}
meta_recipe['generator_partial'] = meta_recipe_generator
meta_recipe['ftrans'] = f'teacher/train.{lpairorig}.{ltgt}'
meta_recipe['spm_model'] = f'models/teacher/{lpairtrue}/vocab.spm'
# create new keys and delete the old ones
meta_recipe['fsrc'] = f'original/train.{lpairorig}.{lsrc}'
meta_recipe['ftgt'] = f'original/train.{lpairorig}.{ltgt}'
meta_recipe['fnew_src'] = f'experiment/{meta_recipe_name}/{lpairtrue}/train.{lpairorig}.{lsrc}'
meta_recipe['fnew_tgt'] = f'experiment/{meta_recipe_name}/{lpairtrue}/train.{lpairorig}.{ltgt}'
# run the job
file_utils.load_process_save(**meta_recipe)
| zouharvi/reference-mt-distill | src/create_data.py | create_data.py | py | 1,805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "recipes.META_RECIPES.items",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "recipes.META_RECIPES",
"line_number": 34,
"usage_type": "name"
},
{
"api_name"... |
42218937553 | import os
import pandas as pd
import time
from pathlib import Path
import shutil
from bs4 import BeautifulSoup
#loading all the files in the memory and parsing it using beautiful soap to get key financials
#problem in reading coal india.
#due to unrecognized encoding, it throws error
#we add encoding="utf8"
df=pd.read_csv('ndtv_nifty50.csv', names=['sr','link'])
df.drop('sr', axis=1, inplace=True)
df.drop(0,axis=0, inplace=True)
##stock_path=r'C:\My Program Files\Python\Python35-32\work\nse\Rediff\profit.ndtv.com\stock'
stock_path=r'C:\Program Files (x86)\Python\Python36-32\work\nse\Rediff\profit.ndtv.com\stock'
print(stock_path)
i=0
columns=[]
df1=pd.DataFrame()
try:
for item in df['link']:
company=item.split('/')[4]
data=[]
i=i+1
if company!='':#'coal-india-ltd_coalindia':
stock_index_file=Path(stock_path +'\\'+ company +'.html')
stock_index_dir=Path(stock_path +'\\'+ company)
if stock_index_file.is_file() and stock_index_dir.is_dir():
print('Reading data for company '+company)
f=open(str(stock_index_file),'r', encoding="utf8")
html=f.read()
soup=BeautifulSoup(html,"html.parser")
table=soup.find(id='keyfunda')
ticker=company.split('_')[1]
data.append(ticker)
columns.append('ticker')
for row in table.find_all('tr'):
j=0
for td in row.find_all('td'):
j=j+1
if j>1:
data.append(td.getText())
if j<=1 and i==1:
columns.append(td.getText())
if i==1:
df1=pd.DataFrame(data=[data],columns=columns)
else:
df1.loc[i]=data
df1.to_csv('key_fin.csv')
## if ticker=='coalindia':
## break
except Exception as e:
print(str(e))
# time.sleep(1)
| santoshjsh/invest | nse_7.py | nse_7.py | py | 2,312 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"lin... |
44675601513 | import pysubs2
import pysrt
archivo = 'subtitle.ass'
subs = pysubs2.load(archivo, encoding='utf-8')
for line in subs:
print(line.text)
textoplano = line.text
texto = open('textoplano.txt', 'a')
texto.write(textoplano)
texto.close()
| FukurOwl/subtitles_translate | load_files.py | load_files.py | py | 260 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pysubs2.load",
"line_number": 4,
"usage_type": "call"
}
] |
17795055591 | from typing import List
class Solution:
def highFive(self, items: List[List[int]]) -> List[List[int]]:
items.sort(key=lambda x: (x[0], x[1]))
ans = list()
for i in range(len(items)):
if i == len(items) - 1 or items[i][0] != items[i + 1][0]:
temp = list()
temp.append(items[i][0])
sum = 0
for j in range(5):
sum += items[i - j][1]
sum //= 5
temp.append(sum)
ans.append(temp)
return ans
| fastso/learning-python | leetcode_cn/solved/pg_1086.py | pg_1086.py | py | 569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
72908308584 | """
Чтобы решить данную задачу, можно воспользоваться алгоритмом поиска в ширину (BFS),
так как он позволяет находить кратчайшие пути в графе с невзвешенными ребрами.
В нашем случае города и дороги между ними образуют граф без весов на ребрах.
"""
from collections import deque
# Функция для вычисления расстояния между двумя городами.
def distance(city1, city2):
return abs(city1[0] - city2[0]) + abs(city1[1] - city2[1]) + abs(city1[2] - city2[2])
# Функция для поиска кратчайшего пути с помощью BFS (поиск в ширину).
def find_shortest_path(n, cities, max_distance, start_city, end_city):
visited = [False] * n # Массив для отслеживания посещенных городов
queue = deque([(start_city, 0)]) # Очередь для BFS, каждый элемент - (город, расстояние до него)
while queue:
current_city, current_distance = queue.popleft()
visited[current_city] = True
if current_city == end_city:
return current_distance # Мы достигли конечного города, возвращаем расстояние
for neighbor in range(n):
# Проверяем, что соседний город не посещён и расстояние до него не превышает максимальное
if not visited[neighbor] and distance(cities[current_city], cities[neighbor]) <= max_distance:
queue.append((neighbor, current_distance + 1)) # Добавляем в очередь соседний город
return -1 # Если не найден путь, возвращаем -1
if __name__ == "__main__":
# Чтение входных данных
n = int(input())
cities = [tuple(map(int, input().split())) for _ in range(n)]
max_distance = int(input())
start_city, end_city = map(int, input().split())
# Поиск кратчайшего пути и вывод результата
shortest_path = find_shortest_path(n, cities, max_distance, start_city - 1, end_city - 1)
print(shortest_path)
"""
В этом примере у нас 5 городов с указанными координатами и максимальное расстояние,
которое Петя может преодолеть без дозаправки, равно 10.
Также указаны начальный город (1) и конечный город (5).
Программа выводит минимальное количество дорог, которое нужно проехать, чтобы попасть из города 1 в город 5.
python .\6_find_shortest_path.py
5
0 0 0
1 2 3
4 5 6
7 8 9
10 11 12
10
1 5
4
"""
"""
Давайте оценим сложность данного алгоритма.
Создание списка visited и инициализация его значений занимает O(n) времени и памяти, где n - количество городов.
Создание очереди queue с одним элементом занимает O(1) времени и памяти.
Внутри цикла while queue выполняется BFS по всем соседним городам.
В худшем случае BFS может обойти все города, поэтому количество итераций в цикле не превысит n.
В каждой итерации цикла выполняется проверка соседних городов и добавление их в очередь,
что занимает константное время O(1).
Таким образом, общая сложность алгоритма составляет O(n) в худшем случае,
когда все города являются соседними друг к другу и BFS посетит каждый город.
В среднем случае сложность также будет близка к O(n), поскольку BFS обычно обходит только часть графа
до достижения конечного города.
Данный алгоритм хорошо масштабируется и быстро обрабатывает города в пределах нескольких тысяч.
Однако, при очень больших значениях n алгоритм может стать медленнее из-за обхода всех возможных соседей.
""" | TatsianaPoto/yandex | ML & Programming/6_find_shortest_path.py | 6_find_shortest_path.py | py | 4,858 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
}
] |
43301216014 | import py
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype
from rpython.jit.codewriter.flatten import SSARepr, Label, TLabel, Register
from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
from rpython.jit.codewriter.jitcode import SwitchDictDescr
from rpython.jit.metainterp.history import AbstractDescr
def format_assembler(ssarepr):
"""For testing: format a SSARepr as a multiline string."""
from cStringIO import StringIO
def repr(x):
if isinstance(x, Register):
return '%%%s%d' % (x.kind[0], x.index) # e.g. %i1 or %r2 or %f3
elif isinstance(x, Constant):
if (isinstance(x.concretetype, lltype.Ptr) and
isinstance(x.concretetype.TO, lltype.Struct)):
return '$<* struct %s>' % (x.concretetype.TO._name,)
return '$%r' % (x.value,)
elif isinstance(x, TLabel):
return getlabelname(x)
elif isinstance(x, ListOfKind):
return '%s[%s]' % (x.kind[0].upper(), ', '.join(map(repr, x)))
elif isinstance(x, SwitchDictDescr):
return '<SwitchDictDescr %s>' % (
', '.join(['%s:%s' % (key, getlabelname(lbl))
for key, lbl in x._labels]))
elif isinstance(x, (AbstractDescr, IndirectCallTargets)):
return '%r' % (x,)
else:
return '<unknown object: %r>' % (x,)
seenlabels = {}
for asm in ssarepr.insns:
for x in asm:
if isinstance(x, TLabel):
seenlabels[x.name] = -1
elif isinstance(x, SwitchDictDescr):
for _, switch in x._labels:
seenlabels[switch.name] = -1
labelcount = [0]
def getlabelname(lbl):
if seenlabels[lbl.name] == -1:
labelcount[0] += 1
seenlabels[lbl.name] = labelcount[0]
return 'L%d' % seenlabels[lbl.name]
output = StringIO()
insns = ssarepr.insns
if insns and insns[-1] == ('---',):
insns = insns[:-1]
for i, asm in enumerate(insns):
if ssarepr._insns_pos:
prefix = '%4d ' % ssarepr._insns_pos[i]
else:
prefix = ''
if isinstance(asm[0], Label):
if asm[0].name in seenlabels:
print >> output, prefix + '%s:' % getlabelname(asm[0])
else:
print >> output, prefix + asm[0],
if len(asm) > 1:
if asm[-2] == '->':
if len(asm) == 3:
print >> output, '->', repr(asm[-1])
else:
lst = map(repr, asm[1:-2])
print >> output, ', '.join(lst), '->', repr(asm[-1])
else:
lst = map(repr, asm[1:])
if asm[0] == '-live-': lst.sort()
print >> output, ', '.join(lst)
else:
print >> output
res = output.getvalue()
return res
def assert_format(ssarepr, expected):
asm = format_assembler(ssarepr)
if expected != '':
expected = str(py.code.Source(expected)).strip() + '\n'
asmlines = asm.split("\n")
explines = expected.split("\n")
for asm, exp in zip(asmlines, explines):
if asm != exp:
msg = [""]
msg.append("Got: " + asm)
msg.append("Expected: " + exp)
lgt = 0
for i in range(min(len(asm), len(exp))):
if exp[i] == asm[i]:
lgt += 1
else:
break
msg.append(" " + " " * lgt + "^^^^")
raise AssertionError('\n'.join(msg))
assert len(asmlines) == len(explines)
def unformat_assembler(text, registers=None):
# XXX limited to simple assembler right now
#
def unformat_arg(s):
if s.endswith(','):
s = s[:-1].rstrip()
if s[0] == '%':
try:
return registers[s]
except KeyError:
num = int(s[2:])
if s[1] == 'i': reg = Register('int', num)
elif s[1] == 'r': reg = Register('ref', num)
elif s[1] == 'f': reg = Register('float', num)
else: raise AssertionError("bad register type")
registers[s] = reg
return reg
elif s[0] == '$':
intvalue = int(s[1:])
return Constant(intvalue, lltype.Signed)
elif s[0] == 'L':
return TLabel(s)
elif s[0] in 'IRF' and s[1] == '[' and s[-1] == ']':
items = split_words(s[2:-1])
items = map(unformat_arg, items)
return ListOfKind({'I': 'int', 'R': 'ref', 'F': 'float'}[s[0]],
items)
elif s.startswith('<SwitchDictDescr '):
assert s.endswith('>')
switchdict = SwitchDictDescr()
switchdict._labels = []
items = split_words(s[len('<SwitchDictDescr '):-1])
for item in items:
key, value = item.split(':')
value = value.rstrip(',')
switchdict._labels.append((int(key), TLabel(value)))
return switchdict
else:
raise AssertionError("unsupported argument: %r" % (s,))
#
if registers is None:
registers = {}
ssarepr = SSARepr('test')
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('L') and line.endswith(':'):
ssarepr.insns.append((Label(line[:-1]),))
else:
try:
opname, line = line.split(None, 1)
except ValueError:
opname, line = line, ''
words = list(split_words(line))
if '->' in words:
assert words.index('->') == len(words) - 2
extra = ['->', unformat_arg(words[-1])]
del words[-2:]
else:
extra = []
insn = [opname] + [unformat_arg(s) for s in words] + extra
ssarepr.insns.append(tuple(insn))
return ssarepr
def split_words(line):
word = ''
nested = 0
for i, c in enumerate(line):
if c == ' ' and nested == 0:
if word:
yield word
word = ''
else:
word += c
if c in '<([':
nested += 1
if c in '])>' and (' '+line)[i:i+4] != ' -> ':
nested -= 1
assert nested >= 0
if word:
yield word
assert nested == 0
| mozillazg/pypy | rpython/jit/codewriter/format.py | format.py | py | 6,680 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.jit.codewriter.flatten.Register",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "rpython.flowspace.model.Constant",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "rpython.rtyper.lltypesystem.lltype.Ptr",
"line_number": 18,
... |
14365200747 | #Import dependencies
import json
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
import psycopg2
from config import db_password
import time
#Set file directory
file_dir = '/Users/mariacarter/Desktop/Berkeley-Bootcamp/Analysis-Projects/Movies-ETL/Resources/'
def process_ETL(wiki_movies, kaggle_metadata, ratings):
with open (f'{file_dir}/'+wiki_movies, mode='r') as file:
wiki_movies_raw = json.load(file)
kaggle_metadata = pd.read_csv(f'{file_dir}/'+kaggle_metadata, low_memory=False)
ratings = pd.read_csv(f'{file_dir}/'+ratings)
#Use a list comprehension to filter data
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie
and 'No. of episodes' not in movie]
#Loop through every key, add the alt_titles dict to the movie object
def clean_movie(movie):
movie = dict(movie) # create a non-destructive copy
alt_titles = {}
#Combine alternate titles into one list
for key in ['Also known as', 'Arabic', 'Cantonese', 'Chinese', 'French',
'Hangul', 'Hebrew', 'Hepburn', 'Japanese', 'Literally',
'Mandarin', 'McCune–Reischauer', 'Original title', 'Polish',
'Revised Romanization', 'Romanized', 'Russian',
'Simplified', 'Traditional', 'Yiddish']:
if key in movie:
alt_titles[key] = movie[key]
movie.pop(key)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
#Merge column names
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Adaptation by', 'Writer(s)')
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Original release', 'Release date')
change_column_name('Music by', 'Composer(s)')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Productioncompanies', 'Production company(s)')
change_column_name('Productioncompanies ', 'Production company(s)')
change_column_name('Productioncompany', 'Production company(s)')
change_column_name('Productioncompany ', 'Production company(s)')
change_column_name('Released', 'Release date')
change_column_name('Released Date', 'Release date')
change_column_name('Screen story by', 'Writer(s)')
change_column_name('Screenplay by', 'Writer(s)')
change_column_name('Story by', 'Writer(s)')
change_column_name('Theme music composer', 'Composer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
#Use a list comprehension to make a list of clean movies
clean_movies = [clean_movie(movie) for movie in wiki_movies]
#Create a Wiki Movies DF from the clean movies dataset
wiki_movies_df = pd.DataFrame(clean_movies)
#Extract IMDb ID
wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})')
#Drop duplicate IMDb IDs
wiki_movies_df.drop_duplicates(subset= 'imdb_id', inplace=True)
#Use a list comprehension to remove mostly null columns from the Wiki Movies DF
wiki_columns_to_keep = [column for column in wiki_movies_df.columns
if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9]
#Create a revised Wiki Movies DF from the updated data
wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]
#Drop 'Box Office' from dataset, converting lists to strings
box_office = wiki_movies_df['Box office'].dropna().apply(lambda x: ''.join(x) if type(x) == list else x)
#Create forms in the 'Box Office' data and use regular expressions to parse the data
form_one = r'\$\s*\d+\.?\d*\s*[mb]illi?on'
form_two = r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)'
box_office = box_office.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Extract & convert the 'Box Office' values
box_office.str.extract(f'({form_one}|{form_two})')
def parse_dollars(s):
#If s is not a string, return NaN
if type(s) != str:
return np.nan
#If input is of the form $###.# million
if re.match(r'\$\s*\d+\.?\d*\s*milli?on', s, flags=re.IGNORECASE):
#Remove dollar sign and " million"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
#Convert to float and multiply by a million
value = float(s) * 10**6
#Return value
return value
#If input is of the form $###.# billion
elif re.match('\$\s*\d+\.?\d*\s*billi?on', s, flags=re.IGNORECASE):
#Remove dollar sign and " billion"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
#Convert to float and multiply by a billion
value = float(s) * 10**9
#Return value
return value
#If input is of the form $###,###,###
elif re.match(r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)', s, flags=re.IGNORECASE):
#Remove dollar sign and commas
s = re.sub('\$|,','', s)
#Convert to float
value = float(s)
#Return value
return value
#Otherwise, return NaN
else:
return np.nan
#Extract the values from 'Box Office' using str.extract & apply parse_dollars to the 1st column
wiki_movies_df['box_office'] = box_office.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
#Drop the 'Box Office' column
wiki_movies_df.drop('Box office', axis=1, inplace=True)
#Drop 'Budget' from dataset, converting lists to strings:
budget = wiki_movies_df['Budget'].dropna().apply(lambda x: ''.join(x) if type(x) == list else x)
#Remove any values betwen a dollar sign & a hyphen in 'Budget'
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Remove any values betwen a dollar sign & a hyphen in 'Budget'
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Use same pattern matches to parse 'Budget'
matches_form_one = budget.str.contains(form_one, flags=re.IGNORECASE)
matches_form_two = budget.str.contains(form_two, flags=re.IGNORECASE)
budget[~matches_form_one & ~matches_form_two]
#Remove citation references
budget = budget.str.replace(r'\[\d+\]s*','')
budget[~matches_form_one & ~matches_form_two]
#Parse the 'Budget' values
wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
#Drop the 'Budget' column
wiki_movies_df.drop('Budget', axis=1, inplace=True)
#Drop 'Release date' from dataset, converting lists to strings:
release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ''.join(x) if type(x)== list else x)
#Parse the forms
date_form_one = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s[123]\d,\s\d{4}'
date_form_two = r'\d{4}.[01]\d.[123]\d'
date_form_three = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s\d{4}'
date_form_four = r'\d{4}'
#Extract the dates
release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})', flags=re.IGNORECASE)
#Use built-in to_datetime() to parse the dates, and set the infer_datetime_format option to 'True' because there are different date formats.
wiki_movies_df['release_date'] = pd.to_datetime(release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})')[0], infer_datetime_format=True)
#Drop 'Running time' from dataset, converting lists to strings:
running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
#Extract digits, and allow for both possible patterns by adding capture groups around the \d instances and add an alternating character
running_time_extract = running_time.str.extract(r'(\d+)\s*ho?u?r?s?\s*(\d*)|(\d+)\s*m')
#Convert from string to numeric
running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)
#Apply a function that converts the 'hour' and 'minute' capture groups to 'minutes' if the pure minutes capture group is zero, and save the output to wiki_movies_df
wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)
#Drop 'running time'
wiki_movies_df.drop('Running time', axis=1, inplace=True)
#Remove bad data from Kaggle Metadata DF
kaggle_metadata[~kaggle_metadata['adult'].isin(['True','False'])]
#Keep rows where adult=False, then drop the adult column
kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult', axis='columns')
#Convert data to since 'video' are T/F values
kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'
#For numeric columns, use to_numeric() method.
#Make sure errors= argument is set to 'raise' so that we know if theres data that can't be converted to numbers
kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)
kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')
kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')
#Convert 'release_date' to datetime using to_datetime()
kaggle_metadata['release_date'] = pd.to_datetime(kaggle_metadata['release_date'])
#Since there's so many rows that are null in the Ratings DF, set the null_counts = True
ratings.info(null_counts=True)
#Specify in to_datetime() that the origin is 'unix' and the time unit is seconds, and assign it to the 'timestamp; column
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
#Merge Wikipedia & Kaggle Metadata
movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki', '_kaggle'])
#Drop the wild outlier (aka 'The Holiday') from Wikipedia data
movies_df = movies_df.drop(movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')].index)
#Convert the 'Languge' list to a tuple so that .value_counts() can work
movies_df['Language'].apply(lambda x: tuple(x) if type(x) == list else x).value_counts(dropna=False)
movies_df['original_language'].value_counts(dropna=False)
#Drop the title_wiki, release_date_wiki, Language, and Production company(s) columns
movies_df.drop(columns=['title_wiki', 'release_date_wiki', 'Language', 'Production company(s)'], inplace=True)
#Make a function that fills in missing data for a column pair and then drops the redundant column
def fill_missing_kaggle_data(df, kaggle_column, wiki_column):
df[kaggle_column] = df.apply(
lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column]
, axis=1)
df.drop(columns=wiki_column, inplace=True)
#Run the function for the three column pairs that were decided to be filled with zeros
fill_missing_kaggle_data(movies_df, 'runtime', 'running_time')
fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki')
fill_missing_kaggle_data(movies_df, 'revenue', 'box_office')
#Check that there aren’t any columns with only one value, and convert lists to tuples for value_counts() to work.
for col in movies_df.columns:
lists_to_tuples = lambda x: tuple(x) if type(x) == list else x
value_counts = movies_df[col].apply(lists_to_tuples).value_counts(dropna=False)
num_values = len(value_counts)
if num_values == 1:
print(col)
movies_df['video'].value_counts(dropna=False)
#Reorder the columns
movies_df = movies_df[['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
'genres','original_language','overview','spoken_languages','Country',
'production_companies','production_countries','Distributor',
'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'
]]
#Rename the columns
movies_df.rename({'id':'kaggle_id',
'title_kaggle':'title',
'url':'wikipedia_url',
'budget_kaggle':'budget',
'release_date_kaggle':'release_date',
'Country':'country',
'Distributor':'distributor',
'Producer(s)':'producers',
'Director':'director',
'Starring':'starring',
'Cinematography':'cinematography',
'Editor(s)':'editors',
'Writer(s)':'writers',
'Composer(s)':'composers',
'Based on':'based_on'
}, axis='columns', inplace=True)
#Count how many times a movie received a given rating
rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \
.rename({'userId':'count'}, axis=1) \
.pivot(index='movieId',columns='rating', values='count')
#Rename the columns... prepend rating_ to each column with a list comprehension:
rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]
#Connect Pandas to SQL
db_string = f"postgres://postgres:{db_password}@127.0.0.1:5432/movie_data"
engine = create_engine(db_string)
#Import the movie data
movies_df.to_sql(name='movies', con=engine)
#Create a variable for the number of rows imported
rows_imported = 0
#Get the start_time from time.time()
start_time = time.time()
for data in pd.read_csv(f'{file_dir}ratings.csv', chunksize=1000000):
#Print out the range of rows that are being imported
print(f'importing rows {rows_imported} to {rows_imported + len(data)}...', end='')
data.to_sql(name='ratings', con=engine, index=False, if_exists='replace')
#Increment the number of rows imported by the size of 'data'
rows_imported += len(data)
#Add elapsed time to final printout
print(f'Done. {time.time() - start_time} total seconds elapsed')
process_ETL("wikipedia.movies.json", "movies_metadata.csv", "ratings.csv") | mcarter-00/Movies-ETL | challenge.py | challenge.py | py | 15,363 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"lin... |
3733422218 | from argparse import Namespace
from pandas.core.frame import DataFrame
from app.command.sub_command import SubCommand
from app.error.column_already_exists_error import ColumnAlreadyExistsError
from app.error.column_not_found_error import ColumnNotFoundError
class Add(SubCommand):
def __init__(self, args: Namespace) -> None:
super().__init__(args)
self.column = args.column
self.default = args.default
self.first = args.first
self.after = args.after
def process(self, df: DataFrame) -> DataFrame:
headers = list(df.columns.values)
# 入力チェック
if self.column in headers:
message = "column `{}` already exist".format(self.column)
raise ColumnAlreadyExistsError(message)
if self.after and self.after not in headers:
message = "column `{}` is not found".format(self.after)
raise ColumnNotFoundError(message)
# 項目の追加
df[self.column] = self.default
# 出力する項目
new_headers = headers[:]
if self.first:
new_headers.insert(0, self.column)
elif self.after:
i = headers.index(self.after)
new_headers.insert(i + 1, self.column)
else:
new_headers.append(self.column)
return df[new_headers]
| takenoco82/alter_csv | src/app/command/add.py | add.py | py | 1,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.command.sub_command.SubCommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.core.frame.DataFrame",
"line_number": 18,
"usage_type": "name"
},
{
... |
71551407784 | import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from textClustringAnalysis.feature.common import dict2Array, myTFIDF
from textClustringAnalysis.feature.main import TC, TC_PCA, PCA
from textClustringAnalysis.preprocessor.dataInfo import getWordCount
if __name__ == '__main__':
"""
当我们想要对高维数据进行分类,又不清楚这个数据集有没有很好的可分性(即同类之间间隔小,异类之间间隔大)
可以通过t - SNE投影到2维或者3维的空间中观察一下。如果在低维空间中具有可分性,则数据是可分的;
如果在高维空间中不具有可分性,可能是数据不可分,也可能仅仅是因为不能投影到低维空间。
"""
for i in ['txt1', 'txt2']: # ['txt_about1','txt_about2']:
# outDir = '/Users/brobear/PycharmProjects/TextClusteringAnalysis/txt1'
outDir = '/Users/brobear/PycharmProjects/TextClusteringAnalysis/' + i
txt_dict = getWordCount(outDir)
# tfidf_dict = myTFIDF(txt_dict, itc=True)
# data, textNames, wordName = dict2Array(tfidf_dict)
data, textNames = TC_PCA(txt_dict, minTC=5, itc=False, topN=0.8)[:2]
tsne = TSNE(n_components=2)
a = tsne.fit_transform(data) # 进行数据降维,降成两维
plt.scatter(a[:, 0], a[:, 1], s=2, alpha=1)
title = '%s TC5_PCA0_8' % i
plt.title(title)
plt.savefig('/Users/brobear/PycharmProjects/TextClusteringAnalysis/textClustringAnalysis/tsne-images/%s'
'.png' % title)
plt.show()
# TC_PCA(txt_dict, minTC=5, itc=True,topN=0.8)[:2] 680
# TC_PCA(txt_dict, minTC=5, itc=False,topN=0.8)[:2] 497
# PCA(txt_dict, itc=False, topN=0.8)[:2] 1198
# PCA(txt_dict, itc=True, topN=0.8)[:2] 1171
# data, textNames = TC(txt_dict, topN=1100)[:2] 1100 txt1:37.64
# data, textNames = TC(txt_dict, topN=600)[:2] 600 txt1:47.00
# TC_PCA(txt_dict, minTC=5, itc=True,topN=0.8)[:2] 680
# minTC 0 5 10 37.64 38.67 47.00 52.74
# txt1 36731 3638 2684 1100 600
# txt2 29958 3305 2503 1100 600
| bearbro/TextClusteringAnalysis | textClustringAnalysis/showdatafirst.py | showdatafirst.py | py | 2,127 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "textClustringAnalysis.preprocessor.dataInfo.getWordCount",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "textClustringAnalysis.feature.main.TC_PCA",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 21,... |
35206645553 | from django.contrib import admin
from .models import *
class CostInline(admin.TabularInline):
model = Cost
extra = 0
class CoordinatesAdmin(admin.ModelAdmin):
list_display = ('latitude', 'longitude')
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'population')
class CountryAdmin(admin.ModelAdmin):
filter_horizontal = ('languages',)
list_display = ('name', 'emoji', 'population', 'is_marker')
class CityAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'population', 'hemisphere', 'continent', 'coastal', 'elevation', 'coordinates')
def save_model(self, request, obj, form, change):
from mapbox import Geocoder
import json
import os
if not form.cleaned_data['manual_coordinates']:
if ', ' in form.cleaned_data['name']:
location_list = form.cleaned_data['name'].split(', ')
location_list.reverse()
name = "{0}, {1}".format(form.cleaned_data['country'].name, ', '.join(location_list))
else:
name = "{0}, {1}".format(form.cleaned_data['country'].name, form.cleaned_data['name'])
try:
geocoder = Geocoder(access_token=os.getenv('MAPBOX_ACCESS_KEY'))
response = geocoder.forward(name)
mapbox_coords = response.json()['features'][0]['center']
coordinates = Coordinates.objects.create(longitude=mapbox_coords[0], latitude=mapbox_coords[1])
coordinates.save()
obj.coordinates = coordinates
super(CityAdmin, self).save_model(request, obj, form, change)
except:
super(CityAdmin, self).save_model(request, obj, form, change)
else:
super(CityAdmin, self).save_model(request, obj, form, change)
class SenderAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'gender', 'is_avatar')
actions = ['update_achievements']
def update_achievements(self, request, queryset):
for sender in queryset:
sender.update_achievements()
# class FormatPaperAdmin(admin.ModelAdmin):
# class TagAdmin(admin.ModelAdmin):
class NewspaperAdmin(admin.ModelAdmin):
filter_horizontal = ('senders', 'tags',)
list_display = ('title', 'city', 'id', 'number', 'number_2', 'date', 'language', 'is_photo', 'is_thumbnail', 'top')
inlines = [CostInline]
def save_model(self, request, obj, form, change):
tag_ids = [ tag.id for tag in form.cleaned_data['tags'] ]
year_tag, created = Tag.objects.get_or_create(name=obj.date.year)
year_tag.save()
tag_ids.append(year_tag.id)
continent_tag, created = Tag.objects.get_or_create(name=obj.city.continent)
continent_tag.save()
tag_ids.append(continent_tag.id)
color_tag, created = Tag.objects.get_or_create(name=obj.color)
color_tag.save()
tag_ids.append(color_tag.id)
if obj.city.coastal:
coastal_tag, created = Tag.objects.get_or_create(name='Coastal')
coastal_tag.save()
tag_ids.append(coastal_tag.id)
if obj.pravda():
pravda_tag, created = Tag.objects.get_or_create(name='Правда')
pravda_tag.save()
tag_ids.append(pravda_tag.id)
if obj.metro():
metro_tag, created = Tag.objects.get_or_create(name='Metro')
metro_tag.save()
tag_ids.append(metro_tag.id)
if obj.not_official_language():
not_official_language_tag, created = Tag.objects.get_or_create(name='Not Official Language')
not_official_language_tag.save()
tag_ids.append(not_official_language_tag.id)
if obj.city.is_polar():
polar_tag, created = Tag.objects.get_or_create(name='Polar')
polar_tag.save()
tag_ids.append(polar_tag.id)
if obj.frequency:
frequency_tag, created = Tag.objects.get_or_create(name=obj.frequency)
frequency_tag.save()
tag_ids.append(frequency_tag.id)
if obj.type_newspaper != 'Newspaper':
type_tag, created = Tag.objects.get_or_create(name=obj.type_newspaper)
type_tag.save()
tag_ids.append(type_tag.id)
if obj.format_paper:
format_tag, created = Tag.objects.get_or_create(name=obj.format_paper.name)
format_tag.save()
tag_ids.append(format_tag.id)
form.cleaned_data['tags'] = Tag.objects.order_by('name').filter(id__in=tag_ids)
super(NewspaperAdmin, self).save_model(request, obj, form, change)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ('name', 'symbol', 'code', 'order')
admin.site.register(Achievement)
admin.site.register(Coordinates, CoordinatesAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(Sender, SenderAdmin)
admin.site.register(FormatPaper)
admin.site.register(Tag)
admin.site.register(Newspaper, NewspaperAdmin)
admin.site.register(Currency, CurrencyAdmin)
admin.site.register(Cost)
| borisovodov/np | app/admin.py | admin.py | py | 4,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.TabularInline",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 10,
"usage_type": "attribute"... |
35809219103 | #! python2
# -*- coding: utf-8 -*-
import scrapy
import csv
import time
from sys import exit
import os
import logging
from scrapy import signals
from . import wikimallbottodbmy
import re
#from scrapy.utils.log import configure_logging
class WikimallbotSpider(scrapy.Spider):
name = 'wikimallbot'
allowed_domains = ['id.wikipedia.org']
start_urls = ['https://id.wikipedia.org/wiki/Daftar_pusat_perbelanjaan_di_Indonesia']
dir_path = os.path.dirname(os.path.realpath(__file__))
#configure_logging(install_root_handler = False)
#logging.basicConfig (
# filename = dir_path + '/../out/wikimall_log.txt',
# format = '%(levelname)s: %(message)s',
# level = logging.WARNING
#)
timestr = time.strftime("%Y%m%d-%H%M%S")
filename1 = dir_path + '/../out/wikimall_%s.csv' % timestr
filename2 = dir_path + '/../out/wikimall_detail_%s.csv' % timestr
filename3 = dir_path + '/../out/wikimall_links.csv'
fieldnames = ['id_ai','prov','kabkot','nama_mall','detail_link']
fieldnames_detail = ['nama_mall','alamat','lokasi','pemilik','pengembang','pengurus','tanggal_dibuka','jumlah_toko_dan_jasa','jumlah_toko_induk','total_luas_pertokoan','jumlah_lantai','parkir','situs_web','kantor','didirikan','industri','akses_transportasi_umum','pendapatan','arsitek']
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(WikimallbotSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider):
spider.logger.info('Signal sent then Spider closed. file out is : %s', self.filename1)
#self.connect()
#bumntodb.readcsvandupdate(self.allowed_domains[0],self.filename1)
wikimallbottodbmy.readcsvandupdate(self.allowed_domains[0],self.filename1)
wikimallbottodbmy.readcsvandupdate(self.allowed_domains[0],self.filename2)
# saving to mysql should load here
def parse(self, response):
#mw-headline
myyield = {'id_ai': 1}
open(self.filename3, 'a').close()
with open(self.filename2, 'a') as f:
w = csv.DictWriter(f, self.fieldnames_detail, lineterminator='\n', delimiter='|')
w.writeheader()
with open(self.filename1, 'a') as f: # Just use 'w' mode in 3.x
iterasi = 1
rows = response.css('div.mw-parser-output')
prov = ''
kabkot = ''
for row in rows.css('*') :
if row.xpath('name()').get() == 'h2' :
#myyield['id_ai'] = iterasi
myyield['prov'] = row.css('::text').get()
prov = row.css('::text').get()
#myyield['test'] = row.css('::text').get()
subiterasi = 1
if row.xpath('name()').get() == 'dl' :
if row.css('dt > a::text') :
myyield['id_ai'] = subiterasi
myyield['prov'] = prov
myyield['kabkot'] = row.css('dt > a::text').get()
kabkot = row.css('dt > a::text').get()
if row.xpath('name()').get() == 'li' :
if row.css('li') and row.css('li *::text') :
myyield['id_ai'] = iterasi
myyield['prov'] = prov.encode('utf-8')
myyield['kabkot'] = kabkot.encode('utf-8')
myyield['nama_mall'] = re.sub(r'[^\x00-\x7F]+',' ', (row.css('li *::text').get().encode('utf-8')))
if row.css('li > a::attr(href)') :
detail_link = response.urljoin(row.css('li > a::attr(href)').get().encode('utf-8'))
if 'index.php' not in detail_link :
myyield['detail_link'] = detail_link.encode('utf-8')
#yield scrapy.Request(detail_link.encode('utf-8'), self.parse_detail, meta={'timestr':timestr,'iterasi':iterasi,'row':myyield})
#with open(self.dir_path + '/../out/wikimall_links.csv', 'a') as f2:
# w2 = csv.DictWriter(f2, self.fieldnames, lineterminator='\n', delimiter='|')
# w2.writerow(myyield)
else :
myyield['detail_link'] = ''
else :
myyield['detail_link'] = ''
#link_detail = response.urljoin(link_detail)
iterasi += 1
subiterasi += 1
w = csv.DictWriter(f, self.fieldnames, lineterminator='\n', delimiter='|')
if iterasi ==2 :
w.writeheader()
w.writerow(myyield)
with open(self.filename3, 'a') as f2:
w2 = csv.DictWriter(f2, self.fieldnames, lineterminator='\n', delimiter='|')
if iterasi ==2 :
w2.writeheader()
w2.writerow(myyield)
for link in response.css('div.mw-parser-output li > a::attr(href)').getall() :
if 'index.php' not in link :
if ':' not in link.encode('utf-8') :
yield scrapy.Request(response.urljoin(link.encode('utf-8')), self.parse_detail)
#def parse_detail(self, response) :
# print(response.css('table.infobox tr').get())
def parse_detail(self,response) :
myyield = {'nama_mall': response.css('h1.firstHeading::text').get()}
with open(self.filename2, 'a') as f:
if response.css('table.infobox tr') :
rows = response.css('table.infobox tr')
for row in rows :
if row.css('th::text') and row.css('td *::text') :
#self.log('key file %s' % row.css('th::text').get())
if row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||") in self.fieldnames_detail :
if len(row.css('td *::text').getall()) > 1 :
myyield[row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||")] = re.sub(r'[^\x00-\x7F]+',' ', (' '.join(t.encode('utf-8').replace("\n", "").strip() for t in row.css('td *::text').getall()).strip()))
else :
myyield[row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||")] = re.sub(r'[^\x00-\x7F]+',' ', (row.css('td *::text').get().encode('utf-8').replace("\n", "")))
else :
myyield['alamat'] = ''
myyield['lokasi'] = ''
myyield['pemilik'] = ''
myyield['pengembang'] = ''
myyield['pengurus'] = ''
myyield['tanggal_dibuka'] = ''
myyield['jumlah_toko_dan_jasa'] = ''
myyield['jumlah_toko_induk'] = ''
myyield['total_luas_pertokoan'] = ''
myyield['jumlah_lantai'] = ''
myyield['parkir'] = ''
myyield['situs_web'] = ''
myyield['kantor'] = ''
myyield['didirikan'] = ''
myyield['industri'] = ''
myyield['akses_transportasi_umum'] = ''
myyield['pendapatan'] = ''
myyield['arsitek'] = ''
w = csv.DictWriter(f, self.fieldnames_detail, lineterminator='\n', delimiter='|')
#if response.meta.get('iterasi') ==2 :
# w.writeheader()
w.writerow(myyield) | rizanurhadi/webscraping1 | spiders/wikimallbot.py | wikimallbot.py | py | 7,971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
... |
69845996905 | #!/usr/bin/env python
import pika
from pika.adapters import BlockingConnection
from pika import BasicProperties
#connection = BlockingConnection('172.20.14.192')
connection = pika.BlockingConnection(pika.ConnectionParameters('172.20.14.192'))
channel = connection.channel()
client_params = {"x-ha-policy": "all"}
exchange_name = 'public'
queue_name = 'test_queue1'
routing_key = 'test_routing_key1'
channel.exchange_declare(exchange=exchange_name, type='topic')
channel.queue_declare(queue=queue_name, durable=True, arguments=client_params )
channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key)
connection.close()
| appop/simple-test | createmessage/createqueue.py | createqueue.py | py | 654 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pika.BlockingConnection",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 7,
"usage_type": "call"
}
] |
6435166822 | from collections import Counter
def maketemp(l,h):
templist = []
for i in range(l,h+1):
templist.append(i)
return templist
def themode(lst):
n = len(lst)
data = Counter(lst)
get_mode = dict(data)
mode = [k for k, v in get_mode.items() if v == max(list(data.values()))]
if len(mode) == n:
return -1
else:
return mode[0]
minions = int(input())
listoflist = []
listoftemps = []
rooms = 0
for x in range(minions):
x,y = [int(x) for x in input().split()]
listoflist.append(maketemp(x,y))
listoftemps += maketemp(x,y)
listoftemps.sort()
print(listoflist)
print(listoftemps)
while themode(listoftemps) != -1:
mode = themode(listoftemps)
print(mode)
for x in range(len(listoflist)-1,-1,-1):
if mode in listoflist[x]:
for y in listoflist[x]:
listoftemps.remove(y)
del listoflist[x]
print(listoflist)
print(listoftemps)
rooms += 1
rooms += len(listoflist)
print(rooms)
| DongjiY/Kattis | src/airconditioned.py | airconditioned.py | py | 1,012 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
30134924939 | import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, and_
from flask import Flask, jsonify
engine = create_engine("sqlite:///hawaii.sqlite")
base = automap_base()
base.prepare(engine, reflect=True)
measurement = base.classes.measurement
station = base.classes.station
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session=Session(engine)
"""Shows all dates and precipitation data"""
precip = session.query(measurement.date, measurement.prcp).\
order_by(measurement.date).all()
precip_dict = []
for date, prcp in precip:
new_dict = {}
new_dict["date"] = date
new_dict["prcp"] = prcp
precip_dict.append(new_dict)
return jsonify(precip_dict)
session.close()
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
"""Show all station names"""
stat = session.query(measurement.station).all()
stat_names = []
for station in stat:
all_stations = {}
all_stations["station"] = station
stat_names.append(all_stations)
return jsonify(stat_names)
session.close()
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
"""Dates and temperature observations of most active station"""
most_active = session.query(measurement.station, func.count(measurement.station)).group_by(measurement.station).\
order_by(func.count(measurement.station).desc()).all()
most_active_station = most_active[0][0]
last_year = session.query(measurement.date, measurement.tobs).filter(measurement.date >= "2016-03-23").\
filter(measurement.station == most_active_station).order_by(measurement.date).all()
date_list = []
for date, tobs in last_year:
tobs_date = {}
tobs_date[date] = tobs
date_list.append(tobs_date)
return jsonify(date_list)
session.close()
| Emziicles/sqlalchemy-challenge | app.py | app.py | py | 2,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 19,
"usage_type": "call"
},
{
"api_name... |
2503764043 | #!/usr/bin/env python
# *********************
# webcam video stream
# *********************
import time
import cv2
# 0 - Dell Webcam
# cap = cv2.VideoCapture('filename.avi')
cap = cv2.VideoCapture(0)
pTime = 0
while (cap.isOpened()):
cTime = time.time()
ret, frame = cap.read()
if ret == True:
fps = int(1/(cTime - pTime))
cv2.putText(frame, f'FPS: {int(fps)}', (420, 40), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 1), 3)
cv2.imshow('Frame', frame)
pTime = cTime
print(fps)
if cv2.waitKey(25) & 0xFF == ord('q'): break
else: break
cap.release()
cv2.destroyAllWindows()
| ammarajmal/cam_pose_pkg | script/webcam.py | webcam.py | py | 640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"... |
4877559988 |
import os
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import pandas
data = pandas.read_csv('C:\\Users\\aruny\\Downloads\\ezyzip\\MusicApp\\songs.csv')
data = data.to_dict('records')
song_names = [i['name'] for i in data]
dir = 'C:\playlist songs\Malayalam_sad_songs'
files = os.listdir(dir)
for song in files:
downloaded_name = song.replace('_', ' ')
downloaded_name = downloaded_name.replace('.mp3', '')
print(downloaded_name)
max_ratio = 60
song_id = -1
for i in range(983,1053):
# db_song_name = songs.query.filter_by(id=i).first().song_name
ratio = fuzz.ratio(song_names[i], downloaded_name)
#print(db_song_name, " --> ", ratio)
if ratio > max_ratio:
max_ratio = ratio
song_id = i+1
print(song_id)
if song_id == -1:
print(song, " is not found")
else:
old = dir+'//'+song
new = dir+'//'+str(song_id)+'.mp3'
old = old.replace('//', '/')
new = new.replace('//', '/')
#print(old)
#print(new)
os.rename(old, new) | chmson/MusicApp_website | MusicApp/give_id.py | give_id.py | py | 1,094 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.fuzz.ratio",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.fuzz",
... |
10603730701 | import argparse
import time , datetime
from pythonosc import udp_client
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1",
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=9000,
help="The port the OSC server is listening on")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
#base time is set to 7am 0 min 0 sec , to be compare to now datetime
base = datetime.time(7, 0, 0)
if __name__ == '__main__':
while True:
# when realtime is beyond 7am then set timer osc parameter to true
dt_now = datetime.datetime.now()
now = dt_now.time()
if base > now :
client.send_message("/avatar/parameters/timer", 0)
print("timer off ")
else:
client.send_message("/avatar/parameters/timer", 1)
print("timer on ")
time.sleep(60)
| kakuchrome/OSCscript | timer.py | timer.py | py | 968 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pythonosc.udp_client.SimpleUDPClient",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pythonosc.udp_client",
"line_number": 14,
"usage_type": "name"
},
{
"... |
7410224509 | from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import re
import timeit
url = "https://www.investing.com/equities/trending-stocks"
driver = webdriver.Chrome(r"C:\Program Files\chromedriver.exe")
driver.get(url)
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
print(soup)
a = list(soup.find_all("a", {"class": "bold block"}))
for i in a:
a3 = list(str(i).split('>'))
a4 = a3[1].split('<')
print(str(a4[0]))
a = list(soup.find_all('script'))
# print(a)
b = str(a[43])
# Stock Popularity Data
print("Stock Popularity Data")
a1 = re.findall(r"stockPopularityData = .*;", b)
str = a1[0].replace(';', '')
str = str.split('=')
str1 = str[1].lstrip()
str3 = eval(str1)
for value1 in str3.values():
for key, values in value1.items():
print(key, values)
# Sector Popularity Data
print("Sector Popularity Data")
a3 = re.findall(r"sectorPopularityData = .*;", b)
str = a3[0].replace(';', '')
str = str.split('=')
str1 = str[1].lstrip()
str3 = eval(str1)
for value1 in str3.values():
for key, values in value1.items():
print(key, values)
# Trending Stock Quota by Price
print(soup.find_all('table', class_='genTbl closedTbl elpTbl elp20 crossRatesTbl'))
# Trending Stock Quota by Performance
python_button = driver.find_elements_by_xpath(xpath=r"/html/body/div[5]/section/div[7]/div/div[7]/div/a[2]")[0]
python_button.click()
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
# print(soup)
print(soup.find_all('table', class_='genTbl openTbl recentQuotesSideBlockTbl collapsedTbl elpTbl elp30'))
# Trending Stock Quota by Technical
python_button = driver.find_elements_by_xpath(xpath=r"/html/body/div[5]/section/div[7]/div/div[7]/div/a[3]")[0]
python_button.click()
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
print(soup)
| SRI-VISHVA/WebScrapping | scrapping_73.py | scrapping_73.py | py | 1,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.finda... |
1355556431 | import pygame
import os
import time
import random
x = 100
y = 50
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x,y)
width = 1640
height = 980
pygame.init()
screen = pygame.display.set_mode((width, height))
white = (255, 255, 255)
black = (0, 0, 0)
random_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
screen.fill(black)
pygame.display.set_caption("Sorting Algorithm Visualizer")
def createArr(bar_size):
i = 0
while i < width:
value = random.randint(0, height)
pygame.draw.rect(screen, white, (i, height - value, bar_size, height))
arr.append(value)
i += bar_size
pygame.display.flip()
return arr
def bubbleSort(arr, bar_size):
pygame.display.set_caption("Bubble Sort")
for iter_number in range(len(arr) - 1, 0, -1):
for index in range(iter_number):
if arr[index] > arr[index + 1]:
temp = arr[index]
arr[index] = arr[index + 1]
arr[index + 1] = temp
if index % 80 == 0:
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def insertionSort(arr, bar_size):
pygame.display.set_caption("Insertion Sort")
for i in range(1, len(arr)):
j = i - 1
nxt_element = arr[i]
while (arr[j] > nxt_element) and (j >= 0):
arr[j+1] = arr[j]
j -= 1
arr[j + 1] = nxt_element
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def shellSort(arr, bar_size):
pygame.display.set_caption("Shell Sort")
gap = len(arr) // 2
while gap > 0:
for i in range(gap, len(arr)):
temp = arr[i]
j = i
while j >= gap and arr[j - gap] > temp:
arr[j] = arr[j - gap]
j -= gap
arr[j] = temp
if i % 5 == 0:
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
gap = gap // 2
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def selectionSort(arr, bar_size):
pygame.display.set_caption("Selection Sort")
for idx in range(len(arr)):
min_idx = idx
for j in range( idx + 1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[idx], arr[min_idx] = arr[min_idx], arr[idx]
x = 0
screen.fill((0, 0, 0))
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
bar_size = 3
arr = []
arr = createArr(bar_size)
time.sleep(1)
bubbleSort(arr, bar_size)
time.sleep(1)
arr = []
arr = createArr(bar_size)
time.sleep(1)
insertionSort(arr, bar_size)
arr = []
arr = createArr(bar_size)
time.sleep(1)
shellSort(arr, bar_size)
arr = []
arr = createArr(bar_size)
time.sleep(1)
selectionSort(arr, bar_size)
time.sleep(1)
pygame.quit() | MagicLuxa/Python-Projects | sorting algorithms visualized.py | sorting algorithms visualized.py | py | 4,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
... |
37914230085 | from pathlib import Path
import json
import plotly.express as px
import numpy as np
# Read data as a string and convert to a Python object.
path = Path("eq_data/eq_data.geojson")
contents = path.read_text(encoding="utf-8")
all_eq_data = json.loads(contents)
# Examine all the earthquakes in dataset
all_eq_dicts = all_eq_data["features"]
mags, lons, lats, eq_titles = [], [], [], []
for eq_dict in all_eq_dicts:
mag = eq_dict["properties"]["mag"]
lon = eq_dict["geometry"]["coordinates"][0]
lat = eq_dict["geometry"]["coordinates"][1]
eq_title = eq_dict["properties"]["title"]
mags.append(mag)
lons.append(lon)
lats.append(lat)
eq_titles.append(eq_title)
# Normalize magnitudes
normalized_mags = np.array(mags)
normalized_mags = (normalized_mags - np.min(normalized_mags)) / (
np.max(normalized_mags) - np.min(normalized_mags)
)
# Adjust marker size scaling
marker_scaling_factor = 10
scaled_marker_sizes = normalized_mags * marker_scaling_factor
title = "Global Magnitude 4.5+ Earthquakes, Past Month"
fig = px.scatter_geo(
lat=lats,
lon=lons,
title=title,
size=scaled_marker_sizes,
size_max=15,
color=mags,
color_continuous_scale="dense",
labels={"color": "Magnitude", 'lon':"Longitude", 'lat':'Latitude'},
projection="natural earth",
hover_name=eq_titles,
)
# Customize hover label format
hover_template = (
"<b>%{hovertext}</b><br>"
"<b>Magnitude:</b> %{marker.color:.2f}<br>"
"<b>Longitude:</b> %{lon}<br>"
"<b>Latitude:</b> %{lat}<extra></extra>"
)
fig.update_traces(hovertemplate=hover_template)
fig.show()
| hharpreetk/python-earthquake-data-viz | eq_explore_data.py | eq_explore_data.py | py | 1,615 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 28,... |
23413820054 | # -*- coding: utf-8 -*-
"""
Create doc tree if you follows
:ref:`Sanhe Sphinx standard <en_sphinx_doc_style_guide>`.
"""
from __future__ import print_function
import json
from pathlib_mate import PathCls as Path
from .template import TC
from .pkg import textfile
class ArticleFolder(object):
"""
Represent an ``index.rst`` or ``index.ipynb`` file with a Title in a directory.
:param index_file: the index file name (no file extension)
:param dir_path: A folder contains single rst file. The rst file path
**中文文档**
一篇 Article 代表着文件夹中有一个 ``index.rst`` 或 ``index.ipynb`` 文件的文件夹.
其中必然有至少一个标题元素.
"""
DEFAULT_INDEX_FILE = "index"
def __init__(self, index_file=None, dir_path=None):
if index_file is None:
index_file = self.DEFAULT_INDEX_FILE
self.index_file = index_file
self.dir_path = dir_path
self._title = None
@property
def rst_path(self):
"""
The actual rst file absolute path.
"""
return Path(self.dir_path, self.index_file + ".rst").abspath
@property
def ipynb_path(self):
"""
The actual ipynb file absolute path.
"""
return Path(self.dir_path, self.index_file + ".ipynb").abspath
@property
def rel_path(self):
"""
File relative path from the folder.
"""
return "{}/{}".format(Path(self.dir_path).basename, self.index_file)
@property
def title(self):
"""
Title for the first header.
"""
if self._title is None:
if Path(self.rst_path).exists():
self._title = self.get_title_from_rst()
elif Path(self.ipynb_path).exists():
self._title = self.get_title_from_ipynb()
else:
pass
return self._title
def get_title_from_rst(self):
"""
Get title line from .rst file.
**中文文档**
从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
"""
header_bar_char_list = "=-~+*#^"
lines = list()
for cursor_line in textfile.readlines(self.rst_path, strip="both", encoding="utf-8"):
if cursor_line.startswith(".. include::"):
relative_path = cursor_line.split("::")[-1].strip()
included_path = Path(Path(self.rst_path).parent.abspath, relative_path)
if included_path.exists():
cursor_line = included_path.read_text(encoding="utf-8")
lines.append(cursor_line)
rst_content = "\n".join(lines)
cursor_previous_line = None
for cursor_line in rst_content.split("\n"):
for header_bar_char in header_bar_char_list:
if cursor_line.startswith(header_bar_char):
flag_full_bar_char = cursor_line == header_bar_char * len(cursor_line)
flag_line_length_greather_than_1 = len(cursor_line) >= 1
flag_previous_line_not_empty = bool(cursor_previous_line)
if flag_full_bar_char \
and flag_line_length_greather_than_1 \
and flag_previous_line_not_empty:
return cursor_previous_line.strip()
cursor_previous_line = cursor_line
msg = "Warning, this document doesn't have any %s header!" % header_bar_char_list
return None
def get_title_from_ipynb(self):
"""
Get title line from .ipynb file.
**中文文档**
从一个 ``_filename`` 所指定的 .ipynb 文件中, 找到顶级标题.
也就是第一个 ``#`` 后面的部分
"""
data = json.loads(Path(self.ipynb_path).read_text())
for row in data["cells"]:
if len(row["source"]):
content = row["source"][0]
line = content.split("\n")[0]
if "# " in line:
return line[2:].strip()
msg = "Warning, this document doesn't have any level 1 header!"
return None
@property
def sub_article_folders(self):
"""
Returns all valid ArticleFolder sitting inside of
:attr:`ArticleFolder.dir_path`.
"""
l = list()
for p in Path.sort_by_fname(
Path(self.dir_path).select_dir(recursive=False)
):
af = ArticleFolder(index_file=self.index_file, dir_path=p.abspath)
try:
if af.title is not None:
l.append(af)
except:
pass
return l
def toc_directive(self, maxdepth=1):
"""
Generate toctree directive text.
:param table_of_content_header:
:param header_bar_char:
:param header_line_length:
:param maxdepth:
:return:
"""
articles_directive_content = TC.toc.render(
maxdepth=maxdepth,
article_list=self.sub_article_folders,
)
return articles_directive_content
def __repr__(self):
return "Article(index_file=%r, title=%r)" % (self.index_file, self.title,)
| MacHu-GWU/docfly-project | docfly/doctree.py | doctree.py | py | 5,328 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib_mate.PathCls",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pathlib_mate.PathCls",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pathlib_mate.PathCls",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pathli... |
16567280409 | import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, InputLayer, concatenate, Dropout
from keras.models import Model, Sequential
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.utils import np_utils
# Start tf session so we can run code.
sess = tf.InteractiveSession()
# Connect keras to the created session.
K.set_session(sess)
def vlb_binomial(x, x_decoded_mean, t_mean, t_log_var):
"""Returns the value of negative Variational Lower Bound
The inputs are tf.Tensor
x: (batch_size x number_of_pixels) matrix with one image per row with zeros and ones
x_decoded_mean: (batch_size x number_of_pixels) mean of the distribution p(x | t), real numbers from 0 to 1
t_mean: (batch_size x latent_dim) mean vector of the (normal) distribution q(t | x)
t_log_var: (batch_size x latent_dim) logarithm of the variance vector of the (normal) distribution q(t | x)
Returns:
A tf.Tensor with one element (averaged across the batch), VLB
"""
vlb = tf.reduce_mean(
tf.reduce_sum(
x * tf.log(x_decoded_mean + 1e-19)
+ (1 - x) * tf.log(1 - x_decoded_mean + 1e-19),
axis=1,
)
- 0.5
* tf.reduce_sum(-t_log_var + tf.exp(t_log_var) + tf.square(t_mean) - 1, axis=1)
)
return -vlb
def create_encoder(input_dim):
# Encoder network.
# We instantiate these layers separately so as to reuse them later
encoder = Sequential(name="encoder")
encoder.add(InputLayer([input_dim]))
encoder.add(Dense(intermediate_dim, activation="relu"))
encoder.add(Dense(2 * latent_dim))
return encoder
def create_decoder(input_dim):
# Decoder network
# We instantiate these layers separately so as to reuse them later
decoder = Sequential(name="decoder")
decoder.add(InputLayer([input_dim]))
decoder.add(Dense(intermediate_dim, activation="relu"))
decoder.add(Dense(original_dim, activation="sigmoid"))
return decoder
# Sampling from the distribution
# q(t | x) = N(t_mean, exp(t_log_var))
# with reparametrization trick.
def sampling(args):
"""Returns sample from a distribution N(args[0], diag(args[1]))
The sample should be computed with reparametrization trick.
The inputs are tf.Tensor
args[0]: (batch_size x latent_dim) mean of the desired distribution
args[1]: (batch_size x latent_dim) logarithm of the variance vector of the desired distribution
Returns:
A tf.Tensor of size (batch_size x latent_dim), the samples.
"""
t_mean, t_log_var = args
# YOUR CODE HERE
epsilon = K.random_normal(t_mean.shape)
z = epsilon * K.exp(0.5 * t_log_var) + t_mean
return z
batch_size = 100
original_dim = 784 # Number of pixels in MNIST images.
latent_dim = 3 # d, dimensionality of the latent code t.
intermediate_dim = 128 # Size of the hidden layer.
epochs = 20
x = Input(batch_shape=(batch_size, original_dim))
encoder = create_encoder(original_dim)
get_t_mean = Lambda(lambda h: h[:, :latent_dim])
get_t_log_var = Lambda(lambda h: h[:, latent_dim:])
h = encoder(x)
t_mean = get_t_mean(h)
t_log_var = get_t_log_var(h)
t = Lambda(sampling)([t_mean, t_log_var])
decoder = create_decoder(latent_dim)
x_decoded_mean = decoder(t)
| tirthasheshpatel/Generative-Models | vae.py | vae.py | py | 3,413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.InteractiveSession",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "keras.backend.set_session",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": ... |
17079421444 | from glob import glob
from itertools import chain
import logging
from lib.language import canonicalize_tokens, clean_text, clean_tokens, tokenize_text
from lib.language.types import TokenStream
from lib.util import chain as chain_calls, extract_text
logger = logging.getLogger(__name__)
def tokenize_corpus(globby_path: str) -> TokenStream:
"""Convert a directory full of files into a stream of lemmatized tokens"""
return chain.from_iterable(
chain_calls(
extract_text,
clean_text,
tokenize_text,
clean_tokens,
list, # must force eager evaluation here for lemmatizing
canonicalize_tokens,
)(bill)
for bill
in glob(globby_path)
)
| amy-langley/tracking-trans-hate-bills | lib/tasks/tokenize_corpus.py | tokenize_corpus.py | py | 752 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "lib.u... |
34922299042 | """
shared dataloader for multiple issues
must gurantee that a batch only has data from same issue
but the batches can be shuffled
"""
import collections
from more_itertools import more
from numpy.core import overrides
import torch
from torch import tensor
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset
from typing import *
from myprompt.data.example import InputExample, InputFeatures
from torch.utils.data._utils.collate import default_collate
from tqdm.std import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset, Dataset
from myprompt.utils.logging import logger
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils.dummy_pt_objects import PreTrainedModel
from myprompt.template import Template
from myprompt.verbalizer import Verbalizer
from myprompt.plm.utils import TokenizerWrapper
from collections import defaultdict
from myprompt.utils.logging import logger
from myprompt.utils.utils import round_list, signature
from torch.utils.data.sampler import RandomSampler, Sampler
import random
import math
class MultiDataLoader(object):
def __init__(self, dataloaders, shuffle_batch=True):
super().__init__()
self.dataloaders = dataloaders
self.batches = sum([list(iter(self.dataloaders[k])) for k in self.dataloaders],[])
if shuffle_batch:
random.shuffle(self.batches)
def __len__(self):
return len(self.batches)
def __iter__(self):
return (b for b in self.batches)
class myDataset(Dataset):
def __init__(self, data):
self.samples = data
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
class BatchSchedulerSampler(Sampler):
"""
iterate over tasks and provide a random batch per task in each mini-batch
"""
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
self.number_of_datasets = len(dataset.datasets)
self.largest_dataset_size = max([len(cur_dataset.samples) for cur_dataset in dataset.datasets])
def __len__(self):
return self.batch_size * math.ceil(self.largest_dataset_size / self.batch_size) * len(self.dataset.datasets)
def __iter__(self):
samplers_list = []
sampler_iterators = []
for dataset_idx in range(self.number_of_datasets):
cur_dataset = self.dataset.datasets[dataset_idx]
sampler = RandomSampler(cur_dataset)
samplers_list.append(sampler)
cur_sampler_iterator = sampler.__iter__()
sampler_iterators.append(cur_sampler_iterator)
push_index_val = [0] + self.dataset.cumulative_sizes[:-1]
step = self.batch_size * self.number_of_datasets
samples_to_grab = self.batch_size
# for this case we want to get all samples in dataset, this force us to resample from the smaller datasets
epoch_samples = self.largest_dataset_size * self.number_of_datasets
final_samples_list = [] # this is a list of indexes from the combined dataset
for _ in range(0, epoch_samples, step):
random_idx= list(range(self.number_of_datasets))
random.shuffle(random_idx)
#for i in range(self.number_of_datasets):
for i in random_idx:
cur_batch_sampler = sampler_iterators[i]
cur_samples = []
for _ in range(samples_to_grab):
try:
cur_sample_org = cur_batch_sampler.__next__()
cur_sample = cur_sample_org + push_index_val[i]
cur_samples.append(cur_sample)
except StopIteration:
# got to the end of iterator - restart the iterator and continue to get samples
# until reaching "epoch_samples"
sampler_iterators[i] = samplers_list[i].__iter__()
cur_batch_sampler = sampler_iterators[i]
cur_sample_org = cur_batch_sampler.__next__()
cur_sample = cur_sample_org + push_index_val[i]
cur_samples.append(cur_sample)
final_samples_list.extend(cur_samples)
return iter(final_samples_list)
class PromptShareDataLoader(object):
def __init__(self,
dataset: Dict[str, List],
template: Dict[str, Template],
tokenizer: PreTrainedTokenizer,
tokenizer_wrapper_class: TokenizerWrapper,
max_seq_length: Optional[str] = 512,
batch_size: Optional[int] = 1,
shuffle: Optional[bool] = False, #shuffle_sample
shuffle_batch: Optional[bool] = True, #shuffle_batch
teacher_forcing: Optional[bool] = False,
decoder_max_length: Optional[int] = -1,
predict_eos_token: Optional[bool] = False,
truncate_method: Optional[str] = "tail",
**kwargs,
):
self.raw_dataset = dataset
self.wrapped_dataset = collections.defaultdict(list)
self.tensor_dataset = collections.defaultdict(list)
self.template = template
self.batch_size = batch_size
self.shuffle = shuffle
self.shuffle_batch = shuffle_batch
self.teacher_forcing = teacher_forcing
tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args
prepare_kwargs = {
"max_seq_length" : max_seq_length,
"truncate_method" : truncate_method,
"decoder_max_length" : decoder_max_length,
"predict_eos_token" : predict_eos_token,
"tokenizer" : tokenizer,
**kwargs,
}
to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys}
self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs)
#check the wrap function
for k in self.template:
assert hasattr(self.template[k], 'wrap_one_example'), "Your prompt template has no function variable \
named wrap_one_example"
#process: 2 main steps of dataloader
self.wrap()
self.tokenize()
concat_dataset = ConcatDataset([myDataset(self.tensor_dataset[k]) for k in self.tensor_dataset])
self.dataloader = DataLoader(
concat_dataset,
batch_size = self.batch_size,
sampler = BatchSchedulerSampler(concat_dataset, batch_size=batch_size),
collate_fn = InputFeatures.collate_fct
)
def wrap(self):
"""
wrap the text with template
"""
if isinstance(self.raw_dataset, Dict):
for k in self.raw_dataset:
for idx, example in enumerate(self.raw_dataset[k]):
wrapped_example = self.template[k].wrap_one_example(example)
self.wrapped_dataset[k].append(wrapped_example)
else:
raise NotImplementedError
def tokenize(self):
"""
Pass the wraped text into a prompt-specialized tokenizer
"""
for k in self.wrapped_dataset:
for idx, wrapped_example in tqdm(enumerate(self.wrapped_dataset[k]), desc ='tokenizing'):
inputfeatures = InputFeatures(**self.tokenizer_wrapper.tokenize_one_example(wrapped_example, self.teacher_forcing), **wrapped_example[1]).to_tensor()
self.tensor_dataset[k].append(inputfeatures)
def __len__(self):
return len(self.dataloader)
def __iter__(self):
return self.dataloader.__iter__()
| xymou/Frame_Detection | myprompt/myprompt/data/share_dataloader.py | share_dataloader.py | py | 7,930 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.shuffle",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.dataset.Dataset",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.sampler.Sampler",
"line_number": 60,
"usage_type": "name"
},
{
"... |
3327955898 | import argparse
import re
_re_pattern_value_unit = r"^\s*(-?\d+(?:.\d+)?)\s*([a-zA-Z]*?[\/a-zA-Z]*)$"
# this regex captures the following example normal(3s,1ms) with/without spaces between parenthesis and comma(s)
# it's also able to capture the sample duration specified after the distribution e.g., normal(3s,1ms) H 2ms with/without spaces
_re_pattern_compact_distribution = re.compile(
r'^(?P<family>[a-z]+\b)\s*\(\s*(?P<args>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*(H\s*(?P<sample_duration>\d*\.?\d+\s*[a-z]*)\b)*$')
_re_pattern_composition_compact_distribution = re.compile(r"[a-z]+\s*\(.+\).*\s*\+\s*([a-z]+\s*\(.+\).*\s*)+\s*$")
_re_pattern_compact_distance_strategy = re.compile(
r'^(?P<strategy_type>[a-z_]+\b)\s*\(\s*(?P<params>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*$')
_re_pattern_compact_leader_maneuvers = re.compile(
r'^(?P<maneuvers_type>[a-z_]+\b)\s*\(\s*(?P<params>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*$')
_re_pattern_compact_trace_leader_maneuvers = re.compile(
r'^(?P<maneuvers_type>[a-z_]+\b)\s*\(\s*(?P<params>[\w/\.-]+\s*,\s*(?:False|True)\s*)\)\s*$')
def parse_unit_measurement(config_dict):
"""
:param config_dict:
:return update the config_dict using the international system:
"""
config_dict_res = config_dict.copy()
for k, v in config_dict_res.items():
if isinstance(v, dict):
config_dict_res[k] = parse_unit_measurement(v)
elif isinstance(v, list):
config_dict_res[k] = [
parse_and_convert_value(e) if isinstance(e, str) else parse_unit_measurement(e) for e in v]
elif isinstance(v, str):
config_dict_res[k] = parse_and_convert_value(v)
else:
config_dict_res[k] = v
return config_dict_res
def parse_and_convert_value(value):
if isinstance(value, str):
# _re_pattern_composition_compact_distribution.match(value)
comp_compact_dist_res = parse_composition_compact_distribution(value)
if comp_compact_dist_res is not None:
return comp_compact_dist_res
compact_dist_res = parse_distribution_compact(value)
if compact_dist_res is not None:
return compact_dist_res
return _parse_single_unit_value(value)
return value
def _parse_single_unit_value(value_str):
"""
Convert human values in International System Units
It accept simple value, value and unit with and without space
:param value_str:
:return: SI converted unit value
"""
match = re.match(_re_pattern_value_unit, value_str)
if match:
value, unit = match.groups()
if not unit:
return float(value)
else:
return value_str
# speed units
if unit == 'kph':
return float(value) / 3.6
if unit == 'mph':
return float(value) * 0.44704
# time units
if unit == 's':
return float(value)
if unit == 'ms':
return float(value) / 1000
if unit == 'us':
return float(value) / 1000000
if unit == 'm/s':
return float(value)
if unit == 'km/h':
return float(value) / 3.6
# length unit
if unit == 'm':
return float(value)
if unit == 'mm':
return float(value) / (10 ** 3)
if unit == 'cm':
return float(value) / (10 ** 2)
if unit == 'dm':
return float(value) / 10
if unit == 'km':
return float(value) * 1000
if unit == '%':
return float(value) / 100
raise Exception("{} contains unknown unit {} {}".format(value_str, value, unit))
def _parse_distribution_compact_format(distribution_match):
grp_dict = distribution_match.groupdict()
family = grp_dict['family']
args = [_parse_single_unit_value(value.strip()) for value in grp_dict['args'].split(',')]
sample_duration = _parse_single_unit_value(grp_dict['sample_duration']) if grp_dict['sample_duration'] else None
return expand_compact_distribution_format(family, args, sample_duration)
def parse_distribution_compact(value, raise_if_error=False):
"""
Return a tuple (<parsed value>, <exception>)
if raise_if_error is true it raises and exception
"""
distribution_match = _re_pattern_compact_distribution.match(value)
if distribution_match:
return _parse_distribution_compact_format(distribution_match) # match , no error
def parse_composition_compact_distribution(value):
composition_compact_distributions_match = _re_pattern_composition_compact_distribution.match(value)
if composition_compact_distributions_match:
return [parse_distribution_compact(component.strip(), raise_if_error=True)[0] for component in
value.split('+')] # match , no error
# def _split_and_convert_param(param_str):
# p_name, p_value = param_str.split('=')
# return p_name, float(_parse_single_unit_value(p_value))
class DistributionParserAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# parse the compact version of distribution
values = parse_and_convert_value(values)
setattr(namespace, self.dest, values)
def _expand_constant(value):
return {'family': 'constant',
'parameters': {'value': value}}
def _expand_uniform(min_value, max_value):
return {'family': 'uniform',
'parameters': {'min_value': min_value,
'max_value': max_value}}
def _expand_normal(mu, sigma): # TODO min/max values <optional>
return {'family': 'normal',
'parameters': {'mu': mu,
'sigma': sigma}}
def _expand_exponential(rate, min_value=0):
return {'family': 'exponential',
'parameters': {'rate': rate,
'min_value': min_value}}
def _expand_lognormal(mu, sigma, min_value=0):
return {'family': 'lognormal',
'parameters': {'mu': mu,
'sigma': sigma,
'min_value': min_value}}
def _expand_erlang(k, u, min_value=0):
return {'family': 'erlang',
'parameters': {'k': k,
'u': u,
'min_value': min_value}}
def _expand_hypoexponential(*rates):
return {'family': 'hypoexponential',
'parameters': {'rates': rates}}
# family : (expansion function, function arity)
_distribution_dict = {'constant': (_expand_constant, [1]),
'normal': (_expand_normal, [2]),
'uniform': (_expand_uniform, [2]),
'exponential': (_expand_exponential, [1, 2]),
'lognormal': (_expand_lognormal, [2, 3]),
'erlang': (_expand_erlang, [2, 3]),
'hypoexponential': (_expand_hypoexponential, [i for i in range(2)])
# TODO change here, refactor with regex
}
class ProbabilityDistributionNotImplemented(Exception):
...
class ProbabilityDistributionWrongArity(Exception):
...
def expand_compact_distribution_format(family, args, sample_duration):
if family not in _distribution_dict:
raise ProbabilityDistributionNotImplemented(
f"{family} is not implemented. Use the families {list(_distribution_dict.keys())}")
expansion_func, num_args_options = _distribution_dict[family]
if len(args) not in num_args_options:
raise ProbabilityDistributionWrongArity(
f"{family} takes {num_args_options} argument/s, {len(args)} have provided instead.")
expanded_distribution_config = expansion_func(*args)
if sample_duration:
expanded_distribution_config['sample_duration'] = sample_duration
return expanded_distribution_config
if __name__ == '__main__':
delay = parse_unit_measurement(
{'delay.backhaul.uplink_extra_delay': 'exponential(10ms, 10ms) H 10ms'})
print(delay)
| connets/tod-carla | src/args_parse/parser_utils.py | parser_utils.py | py | 7,994 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
... |
35198885002 | # -*- coding: utf-8 -*-
"""
Test to check api calls in in varonis assignment
"""
import json
import pytest
import requests
URL = "http://localhost:8000"
data = {
"data": [{"key": "key1", "val": "val1", "valType": "str"}]
}
credentials_data = {
"username": "test",
"password": "1234"
}
wrong_data = {
"username": "wrong",
"password": "1111"
}
def get_server_token(log_data):
"""
This function gets the authorization token to the server
Arg:
(dict) log_data: to connect to the server
Returns:
(str)token: the token, empty otherwise
"""
response = requests.post(f"{URL}/api/auth/", json=log_data)
if response.status_code == 200:
token = response.json()["access_token"]
print("Authenticated successfully! token: " + token)
return token
else:
print("Error! Response code: " + response.status_code)
return None
def get_objects_ids(headers):
"""
This function gets objects from the server
Returns:
(list) objects
"""
object_ids = []
res = requests.get(f"{URL}/api/poly/", headers=headers)
poly_objects = json.loads(res.text)
# Retrieving all objects
for poly in poly_objects:
object_ids.append(poly['object_id'])
return object_ids
def create_obj(headers):
"""
This function creates an object
Returns:
(obj) poly_object
"""
# Call to server
res = requests.post(f"{URL}/api/poly/", json=data, headers=headers)
print(json.dumps(res.json(), indent=4, default=str))
# Object loaded
poly_object = json.loads(res.text)
return poly_object
def delete_obj(object_id, headers):
"""
This function deletes an object
Args:
object_id (int): the object we delete
headers(dict): to connect to the server
Returns:
(int) response code
"""
res = requests.delete(f"{URL}/api/poly/{object_id}", headers=headers)
# Return answer
return res.status_code
def test_wrong_auth():
"""Test to check connection with wrong credentials
:return: None
"""
# Get access token
access_token = get_server_token(wrong_data)
assert access_token == "", "Could connect to the server with wrong credentials"
def test_auth():
"""Test to verify we can access server with authentication
:return: None
"""
# Get access token
access_token = get_server_token(credentials_data)
assert access_token != "", "Failure in server authentication!"
| eldar101/EldarRep | Python/Varonis/api_assignment/test_api.py | test_api.py | py | 2,627 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_numbe... |
2769961219 | from weixin_api_two.api.contact.get_wework_token import WeworkToken
from weixin_api_two.uitls.get_data import GetData
import loguru
class Tag(WeworkToken):
def __init__(self):
self.baseurl=GetData()
self.log= loguru.logger
self.tagurl=self.baseurl.get_UrlData('url','tag')
self.addurl=self.baseurl.get_UrlData('action','tag','add')
self.searchurl=self.baseurl.get_UrlData('action','tag','search')
self.deleteurl=self.baseurl.get_UrlData('action','tag','delete')
def search(self):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/get_corp_tag_list",
"url": self.tagurl + self.searchurl,
"method": "post",
"params": {"access_token": self.token},
"json": {}
}
return self.request(data)
def add(self, tag_list, group_name):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/add_corp_tag",
"url": self.tagurl+self.addurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"group_name": group_name,
"tag": [{
"name": tag_list,
}],
}
}
return self.request(data)
def delete(self, group_id: list = None, tag_id: list = None):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/del_corp_tag",
"url": self.tagurl + self.deleteurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"group_id": group_id,
"tag_id": tag_id
}
}
return self.request(data)
def delete_list(self, tag_id_list):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/del_corp_tag",
"url": self.tagurl + self.deleteurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"tag_id": tag_id_list
}
}
return self.request(data)
def clear(self):
r = self.search()
tag_id_list = [tag['id'] for group in r.json()['tag_group'] for tag in group['tag']]
r = self.delete_list(tag_id_list)
return r
| liwanli123/HogwartProjectPractice | weixin_api_two/api/externalcontact/tag_api.py | tag_api.py | py | 2,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "weixin_api_two.api.contact.get_wework_token.WeworkToken",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "weixin_api_two.uitls.get_data.GetData",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 10,
"usage_typ... |
73520719463 | from Functions import Functions
from log import Log
from webdriver import WebDriver
import PySimpleGUI as sg
import json
class Main:
def __init__(self):
self.ind = 0 # Indice utilizado para acessar o json.
self.log = Log() # Log de registro.
self.url = Functions().Json('Collector', 'url') # Url da plataforma.
self.webdriver = WebDriver().getDriver() # Options webdriver.
self.webdriver.get(self.url) # Get na url.
self.Json_File = Functions().Json() # Recebe o Steps_json.
if self.Json_File['Steps']: # Verificação para saber se o Steps_json está vazio .
SeleniumActions().LoopDriver(self.log, self.webdriver)
def open_window(self):
sg.theme('Dark Blue 2')
layout = [[sg.Text('Action '),
sg.Combo(['Digitar', 'Enter', 'Click', 'Click_js', 'Double_Click', 'Iframe', 'IframeOFF', 'Link',
'Attribute_ID', 'set_class', 'Alert', 'New_Pag', 'Close', 'Refresh', 'Clear'],
key='Action'),
sg.Checkbox('Search_element', default=True, key='Search')],
[sg.Text('Find '),
sg.Combo(['CSS_SELECTOR', 'ID', 'XPATH', 'Attribute_ID', 'Seletor_Js'],
key='Find'),
sg.Text('Sleep '), sg.Spin(['0', '1', '2', '3', '4', '5'], key='Sleep')],
[sg.Text('Element'), sg.InputText(key='Element')],
[sg.Text('Value '), sg.InputText(key='Value')],
[sg.Button('Save'), sg.Button('Delete')],
[sg.Output(size=(50, 7), key='-OUT-')]]
window = sg.Window('Debug Selenium', layout)
while True:
self.event, values = window.read()
if self.event == sg.WIN_CLOSED:
break
self.dic_steps = {
"Search": f"{str(values['Search'])}",
"Action": f"{values['Action']}",
"Sleep": f"{values['Sleep']}",
"Find": f"{values['Find']}",
"Element": f"{values['Element']}",
"Value": f"{values['Value']}"
}
if self.event == 'Delete':
self.Json_File['Steps'].pop(-1)
self.ind -= 1
with open('Steps.json', 'w') as json_Steps:
json.dump(self.Json_File, json_Steps, indent=4)
print('The last action successfully deleted!!')
if self.event == 'Save':
self.Json_File['Steps'].append(self.dic_steps)
with open('Steps.json', 'w') as json_Steps:
json.dump(self.Json_File, json_Steps, indent=4)
SeleniumActions().Driver(self.webdriver, self.log, self.ind)
self.ind += 1
self.event = ''
class SeleniumActions:
def __init__(self):
self.Json_Steps = Functions().Json('Steps')
def Driver(self, webdriver, log, ind):
log.debug(f"")
log.debug(f"--- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
print()
print(f" --- Started Step {ind + 1}.. ---")
if self.Json_Steps[ind]['Search'] == 'True':
element = Functions().Element(webdriver, self.Json_Steps[ind]['Find'], self.Json_Steps[ind]['Element'])
Functions().Actions(self.Json_Steps[ind]['Action'], self.Json_Steps[ind]['Value'],
webdriver, self.Json_Steps[ind]['Sleep'], element)
def LoopDriver(self, log, webdriver):
for ind, json_steps in enumerate(self.Json_Steps):
log.debug(f"")
log.debug(f"--- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
print()
print(f" --- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
if json_steps['Search'] == 'True':
element = Functions().Element(webdriver, json_steps['Find'], json_steps['Element'])
Functions().Actions(json_steps['Action'], json_steps['Value'], webdriver,
json_steps['Sleep'], element)
if __name__ == "__main__":
Debug = Functions().Json('Collector', 'debug')
if Debug == 'True':
Main().open_window()
elif Debug == 'False':
Main()
| LucasAmorimDC/SeleniumCollector | Selenium_Collector/Main.py | Main.py | py | 4,318 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "log.Log",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Functions.Functions",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "webdriver.WebDriver",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Functions.Functions",... |
38440361902 | import requests
from decouple import config
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
@login_required(login_url='/accounts/login/')
def home(request):
disk = requests.get(config("API") + 'disk_space._')
load = requests.get(config("API") + 'system.load')
ram = requests.get(config("API") + 'system.ram')
net = requests.get(config("API") + 'net.eth0')
context = {
"disk_label": ['Disponível', 'Usado', 'Reservado para o root'],
"disk_data": disk.json()['data'][0][1:],
"load_labels": ['1 minuto', '5 minutos', '15 minutos'],
"load_data": load.json()['data'][0][1:],
"ram_labels": ['Mem. livre', 'Mem. usada', 'Mem. cacheada', 'Buffers'],
"ram_data": ram.json()['data'][0][1:],
"recebido": net.json()['data'][0][1],
"enviado": net.json()['data'][0][2] * -1,
}
return render(request, 'index.html', context)
| carlos-moreno/dashboard | dashboard/core/views.py | views.py | py | 950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_n... |
28984839741 | #! /usr/bin/env python
import argparse
import re
import numpy as np
def to_binary_string(value):
"""
Converts F or L to zeros, and B or R to 1 and interprets the string as a binary value
>>> to_binary_string("FBFBBFF")
('0101100', 44)
>>> to_binary_string("RLR")
('101', 5)
:param value:
:return:
"""
value = re.sub(r"[FL]", "0", value)
value = re.sub(r"[BR]", "1", value)
return value, int(value, 2)
def get_seat_id(value):
"""
Splits the string into row and column parts and interprets each as binary locations. Then
multiplies the row by 8 and adds the column.
>>> get_seat_id("FBFBBFFRLR")
357
>>> get_seat_id("BFFFBBFRRR")
567
>>> get_seat_id("FFFBBBFRRR")
119
>>> get_seat_id("BBFFBBFRLL")
820
:param value:
:return:
"""
row = to_binary_string(value[:7])
col = to_binary_string(value[7:])
return row[1]*8 + col[1]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Day 5 of Advent of Code 2020')
parser.add_argument('file', metavar='filename', type=argparse.FileType('rt'),
help='filename to your personal inputs')
parser.add_argument('--test', '-t', action='store_true')
args = parser.parse_args()
if args.test:
import doctest
doctest.testmod()
print("Tests completed")
exit(0)
with args.file as FILE:
file_content = FILE.readlines()
seat_ids = [get_seat_id(line) for line in file_content]
print(f"There are {len(seat_ids)} boardings cards in the the input, and the highest value is {np.max(seat_ids)}")
for v in range(0, np.max(seat_ids)):
if v not in seat_ids and v-1 in seat_ids and v+1 in seat_ids:
print(f"The value {v} is not in the list, but {v-1} and {v+1} are")
| SocialFinanceDigitalLabs/AdventOfCode | solutions/2020/kws/day_05.py | day_05.py | py | 1,856 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_n... |
12514324819 | import torch
from torchtext.datasets import AG_NEWS
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
UNK = '<unk>'
tok = get_tokenizer('basic_english')
train_iter = AG_NEWS(split='train')
def yield_tokens(data_iter, tokenizer):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter, tok), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>'])
# print(vocab['my'])
# print(vocab(['this', 'is', 'my', 'car']))
# print(vocab['<unk>']) #0
text_pipeline = lambda x: vocab(tok(x))
label_pipeline = lambda x: int(x) - 1
# print(text_pipeline('this is my car'))
# print(vocab(['this is my car']))
# print(tok('this is my car'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label)) # 0 ~ 3
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) # int list to tensor
text_list.append(processed_text) # tensor of int-list-tensor
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
collate_batch(train_iter) | moon0331/TorchTutorial | seq2seq.py | seq2seq.py | py | 1,590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchtext.data.utils.get_tokenizer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchtext.datasets.AG_NEWS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchtext.vocab.build_vocab_from_iterator",
"line_number": 16,
"usage_type":... |
2346859029 | import numpy as np
from PIL import Image
# Goal: convert an image file from normal pixels to ANSI art made of dots "."
# of same color with canvas-like color background
# ANSI foreground color (n, 0-255) based on 256-bit -> \033[38;5;nm
# ANSI background color (n, 0-255) based on 256-bit -> \033[48;5;nm
# end with \033[m
# colors: https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
# example: print("\033[48;5;19m\033[38;5;214mHello world!\033[m")
# Open the image
def open_image_array(img_dir="razorback.png"):
# img_dir = input("Name your picture's filename: ") # input image path
img_rgb = Image.open(img_dir).convert('RGB') # convert from RGBA to RGB
# img_rgb.show()
# convert image into 3D array of 3 8-bit RGB values for each pixel
rgb_array = np.array(img_rgb, dtype=np.uint8)
size_d = list(rgb_array.shape) # pic dims [y, x, 3]
size_d[2] = -1 # change 3 -> -1
# convert 3D 8-bit color array to 2D int32 color array (each pixel has 1 ANSI color value)
colorint32 = np.dstack((rgb_array, np.zeros(rgb_array.shape[:2], 'uint8'))).view('uint32').squeeze(-1)
ansi_array = np.floor(colorint32**(1/3)) # cube root & round down to get 256 ANSI color codes
# convert 2d int32 array back to 3D 8-bit array, if needed
rgb_convert = colorint32.view('uint8').reshape(size_d)[:,:,:3]
# ANSI array of colored dots based on ansi_colors array
# BG = 230 # off-white background canvas color ANSI code
ansi_list = ansi_array.astype('uint8').tolist() # convert array to list of lists
for lst in ansi_list:
dot_list = ['\033[48;5;230m'] # BG color
for val in lst:
dot = '\033[38;5;' + str(val) + 'm.' # add FG color values
dot_list.append(dot)
dot_list.append('\033[m')
row = ''.join(dot_list)
print(row)
# Image.fromarray(canvas_array).show()
# print("\033[48;5;230m\033[38;5;45m.\033[38;5;7m.\033[m")
# print(len(canvas_list))
# print(rgb_array)
# print(size_d)
# print(colorint32)
# print(canvas_array)
# print(canvas_list)
if __name__ == "__main__":
open_image_array() | jakecharris/pointillism | source.py | source.py | py | 2,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number":... |
15771336312 | import ctypes
import typing as t
from . import sdk
from .enum import Result
from .event import bind_events
from .exception import get_exception
from .model import UserAchievement
class AchievementManager:
_internal: sdk.IDiscordAchievementManager = None
_garbage: t.List[t.Any]
_events: sdk.IDiscordAchievementEvents
def __init__(self):
self._garbage = []
self._events = bind_events(
sdk.IDiscordAchievementEvents,
self._on_user_achievement_update
)
def _on_user_achievement_update(self, event_data, user_achievement):
self.on_user_achievement_update(UserAchievement(copy=user_achievement.contents))
def set_user_achievement(
self,
achievement_id: int,
percent_complete: int,
callback: t.Callable[[Result], None]
) -> None:
"""
Updates the current user's status for a given achievement.
Returns discordsdk.enum.Result via callback.
"""
def c_callback(callback_data, result):
self._garbage.remove(c_callback)
result = Result(result)
callback(result)
c_callback = self._internal.set_user_achievement.argtypes[-1](c_callback)
self._garbage.append(c_callback) # prevent it from being garbage collected
self._internal.set_user_achievement(
self._internal,
achievement_id,
percent_complete,
ctypes.c_void_p(),
c_callback
)
def fetch_user_achievements(self, callback: t.Callable[[Result], None]) -> None:
"""
Loads a stable list of the current user's achievements to iterate over.
Returns discordsdk.enum.Result via callback.
"""
def c_callback(callback_data, result):
self._garbage.remove(c_callback)
result = Result(result)
callback(result)
c_callback = self._internal.fetch_user_achievements.argtypes[-1](c_callback)
self._garbage.append(c_callback) # prevent it from being garbage collected
self._internal.fetch_user_achievements(self._internal, ctypes.c_void_p(), c_callback)
def count_user_achievements(self) -> int:
"""
Counts the list of a user's achievements for iteration.
"""
count = ctypes.c_int32()
self._internal.count_user_achievements(self._internal, count)
return count.value
def get_user_achievement_at(self, index: int) -> UserAchievement:
"""
Gets the user's achievement at a given index of their list of achievements.
"""
achievement = sdk.DiscordUserAchievement()
result = Result(self._internal.get_user_achievement_at(
self._internal,
index,
achievement
))
if result != Result.ok:
raise get_exception(result)
return UserAchievement(internal=achievement)
def get_user_achievement(self, achievement_id: int) -> None:
"""
Gets the user achievement for the given achievement id.
"""
achievement = sdk.DiscordUserAchievement()
result = Result(self._internal.get_user_achievement(
self._internal,
achievement_id,
achievement
))
if result != Result.ok:
raise get_exception(result)
return UserAchievement(internal=achievement)
def on_user_achievement_update(self, achievement: UserAchievement) -> None:
"""
Fires when an achievement is updated for the currently connected user
"""
| Maselkov/GW2RPC | gw2rpc/lib/discordsdk/achievement.py | achievement.py | py | 3,610 | python | en | code | 47 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "event.bind_events",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "model.UserAchievem... |
18665738715 | import os, requests
index_file_path = input("Enter Index file path with extension m3u8 : ")
index_file = open(index_file_path,'r')
indexes = index_file.read()
index_file.close()
output_file_path = input("Enter output file path : ")
output_file = open(output_file_path,'wb')
folder_path = input("Enter folder path with extension m3u8_contents ('#<internet>' for get file from internet) : ")
if folder_path == '#<internet>' :
indexes = indexes.split('http')[1:]
indexes = ['http'+x.split('\n')[0] for x in indexes]
for index in indexes :
content = requests.get(index)
output_file.write(content.content)
else :
indexes = indexes.split('file:')[1:]
indexes = [x.split('\n')[0].split('/')[-1] for x in indexes]
for index in indexes :
content_file = open(os.path.join(folder_path,index),'rb')
output_file.write(content_file.read())
content_file.close()
output_file.close()
| nkpro2000sr/m3u8ToVideo | m3u8tovideo.py | m3u8tovideo.py | py | 952 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
}
] |
36749002781 | import cv2
thres = 0.45 # Threshold to detect object
# hog = cv2.HOGDescriptor()
# hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
#
# cv2.startWindowThread()
cap = cv2.VideoCapture(0)
### for IP CAM
# cap = cv2.VideoCapture('rtsp://admin:admin@192.168.1.108/',apiPreference=cv2.CAP_FFMPEG)
cap.set(3,1280)
cap.set(4,720)
cap.set(10,70)
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success,img = cap.read()
classIds, confs, bbox = net.detect(img,confThreshold=thres)
# print(classIds,bbox)
if classIds.__len__() != 0:
for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
cv2.rectangle(img,box,color=(0,255,0),thickness=2)
cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.imshow("Output",img)
cv2.waitKey(ord('q')) | Quant1766/detectObjects | main.py | main.py | py | 1,420 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.dnn_DetectionModel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
... |
30055768931 | import argparse
import numpy as np
from Data import Data
from Experiment import Experiment
from FrameStackExperiment import FrameStackExperiment
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Evaluate termination classifier performance')
parser.add_argument('filepath', type=str, help='filepath of pkl file containing trajectories with RAM states and frames')
parser.add_argument('dest', type=str, help='directory to write results and plots to')
parser.add_argument('term_classifier', type=str, choices=['OneClassSVM', 'TwoClassSVM', 'FullCNN'], help='termination classifier to be used')
parser.add_argument('feature_extractor', type=str, choices=['RawImage', 'DownsampleImage', 'RawRAM', 'MonteRAMState', 'MonteRAMXY', 'BOVW', 'RND', 'CNN'], help='feature extractor to be used')
parser.add_argument('label_extractor', type=str, choices=['BeforeAfterExtractor', 'AfterExtractor', 'OracleExtractor', 'TransductiveExtractor', 'PositiveAugmentExtractor'], help='label extractor to be used')
parser.add_argument('--extract_only_pos', default=False, action='store_true', help='whether label extractor should only extract positive egs')
parser.add_argument('--frame_stack', default=False, action='store_true', help='whether states are frame stacks')
args = parser.parse_args()
#data = Data(args.filepath, train_skip=2000, train_num=200, test_skip=0, test_num=100)
data = Data(args.filepath, train_skip=25, train_num=75, test_skip=25, test_num=25)
# (player_x, player_y, screen) of good subgoals
# [right plat, bottom of ladder of right plat, bottom of ladder of left plat,
# top of ladder of left plat, key, left door, right door]
#subgoals = [(133, 192, 1), (132, 148, 1), (20, 148, 1), (20, 192, 1), (13, 198, 1), (24, 235, 1), (130, 235, 1)]
#subgoals = [(24, 235, 1), (130, 235, 1)]
#subgoals = [(52, 235, 1)]
subgoals = [(133, 148, 1), (58, 192, 1), (35, 235, 1), (119, 235, 1), (49, 235, 1), (88, 192, 1), (142, 192, 1)]
# Prepare hyperparams
if args.label_extractor == 'OracleExtractor':
window_sz_hyperparms = [None]
else:
#window_sz_hyperparms = range(0, 7)
#window_sz_hyperparms = range(2, 3)
window_sz_hyperparms = range(1, 2)
if args.feature_extractor == 'BOVW':
#num_clusters_hyperparams = range(110, 121, 10)
#num_sift_keypoints_hyperparams = range(25, 40, 5)
num_clusters_hyperparams = range(110, 111, 10)
num_sift_keypoints_hyperparams = range(25, 26, 5)
else:
num_clusters_hyperparams = [None]
num_sift_keypoints_hyperparams = [None]
if args.term_classifier == 'OneClassSVM':
nu_hyperparams = np.arange(0.3, 0.5, 0.1)
else:
nu_hyperparams = [None]
if args.term_classifier == 'FullCNN':
gamma_hyperparams = [None]
else:
#gamma_hyperparams = [0.0001, 0.001, 0.01, 0.1, 'scale', 'auto']
#gamma_hyperparams = [0.001, 0.01, 'auto']
#gamma_hyperparams = [0.001]
#gamma_hyperparams = [0.1]
#gamma_hyperparams = [0.1, 'auto']
#gamma_hyperparams = ['scale']
#gamma_hyperparams = [0.000001]
gamma_hyperparams = [0.000000004]
# Prepare information on each subgoal
subgoals_info = {}
for subgoal in subgoals:
traj_idx, state_idx = data.find_first_instance(data.train_ram_trajs, subgoal)
if traj_idx is None:
continue
subgoal_ram = data.train_ram_trajs[traj_idx][state_idx]
ground_truth_idxs = data.filter_in_term_set(data.test_ram_trajs, subgoal_ram)
subgoals_info[subgoal] = {'traj_idx': traj_idx,
'state_idx': state_idx,
'ground_truth_idxs': ground_truth_idxs}
# Run experiments
for num_clusters in num_clusters_hyperparams:
for num_sift_keypoints in num_sift_keypoints_hyperparams:
for window_sz in window_sz_hyperparms:
for nu in nu_hyperparams:
for gamma in gamma_hyperparams:
for i in range(1):
print(f"[+] clusters={num_clusters}, kps={num_sift_keypoints}, window_sz={window_sz}, nu={nu}, gamma={gamma}")
if args.feature_extractor in ['RawImage', 'DownsampleImage', 'BOVW', 'RND', 'CNN'] or args.term_classifier == 'FullCNN':
train_trajs = data.train_frame_trajs
test_trajs = data.test_frame_trajs
elif args.feature_extractor in ['RawRAM', 'MonteRAMState', 'MonteRAMXY']:
train_trajs = data.train_raw_ram_trajs
test_trajs = data.test_raw_ram_trajs
# Run experiment
hyperparams = {
"num_sift_keypoints": num_sift_keypoints,
"num_clusters": num_clusters,
"window_sz": window_sz,
"nu": nu,
"gamma": gamma,
}
if args.frame_stack:
experiment = FrameStackExperiment(train_trajs, data.train_raw_ram_trajs, test_trajs, data.test_raw_ram_trajs,
subgoals, subgoals_info,
args, hyperparams)
else:
experiment = Experiment(train_trajs, data.train_raw_ram_trajs, test_trajs, data.test_raw_ram_trajs,
subgoals, subgoals_info,
args, hyperparams)
experiment.run()
| jwnicholas99/option-term-classifier | run.py | run.py | py | 5,905 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Data.Data",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "FrameStackExperiment.Fr... |
17613252101 | import plotly.plotly as py
import plotly.graph_objs as go
from pymongo import MongoClient
import sys
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
from networkx.algorithms import approximation as approx
from nxpd import draw
from networkx.drawing.nx_agraph import graphviz_layout
client = MongoClient()
client = MongoClient('mongodb://m49dy:admin12345@ds251799.mlab.com:51799/sna_project')
db = client['sna_project']
userCollection = db['users']
groupCollection = db['groups']
postCollection=db['posts']
array_users = list(userCollection.find())
array_groups = list(groupCollection.find())
print(array_users[0])
names=[]
no_of_friends=[]
no_of_posts=[]
group_names=[]
users_groups=[]
posts=[]
use=[]
males=0
females=0
group_posts=[]
print(list(postCollection.find())[0]["group"])
for user in array_users:
names.append(user["name"])
no_of_friends.append(len(user["friends"]))
no_of_posts.append(len(user["posts"]))
if 'gender' in user:
if(user["gender"]==1):
males=males+1
else:
females=females+1
for group in array_groups:
group_names.append(group["name"])
users_groups.append(len(group["users"]))
for group in array_groups:
no=0
for post in list(postCollection.find()):
if 'group' in post:
if post["group"]==group["_id"]:
no=no+1
group_posts.append(no)
# Replace the username, and API key with your credentials.
py.sign_in('diaa56', 'QaMy3cKad5uFqnLP8oaL')
trace = go.Bar(x=names, y= no_of_friends)
data = [trace]
layout=go.Layout(title = 'no of friends for each user', width = 800, height = 640,xaxis= dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis = dict(
title='no of friends',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/a-simple-plot.png')
trace = go.Bar(x=names, y=no_of_posts)
data = [trace]
layout = go.Layout(title='no of posts for each user', width=800, height=640, xaxis=dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/posts.png')
trace = go.Bar(x=group_names, y=users_groups)
data = [trace]
layout = go.Layout(title='no of users for each group', width=800, height=640, xaxis=dict(
title='groups',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/groups.png')
trace = go.Bar(x=group_names, y=group_posts)
data = [trace]
layout = go.Layout(title='no of posts for each group', width=800, height=640, xaxis=dict(
title='groups',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/groups_posts.png')
if len(sys.argv) > 1:
target=sys.argv[1]
print(sys.argv[1])
gp = groupCollection.find_one( {"name":target})
print(gp)
G = nx.Graph()
for user in gp["users"]:
G.add_node(userCollection.find_one({"_id": user})["name"])
for user in gp["users"]:
data = userCollection.find_one({"_id": user})
if 'friends' in data:
for one in data["friends"]:
if one in gp["users"]:
G.add_edge(data["name"], userCollection.find_one({"_id": one})["name"], color='grey')
edges = G.edges()
colors = [G[u][v]['color'] for u, v in edges]
pos = nx.nx_pydot.graphviz_layout(G)
nx.draw(G, pos, with_labels=True, font_weight='bold', node_color='red',font_size=18, node_size=0, edge_color=colors, width=5)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(20, 15)
plt.savefig("assets/connect.png")
if gp:
print("lol")
if 'users' in gp:
for user in gp["users"]:
no = 0
us = userCollection.find_one({"_id": user})
use.append(us["name"])
for post in us["posts"]:
ps = postCollection.find_one({"_id": post})
if ps:
if 'group' in ps:
if(ps["group"] == gp["_id"]):
no = no + 1
posts.append(no)
trace = go.Bar(x=use, y=posts)
data = [trace]
layout = go.Layout(title='no of posts for each user in group '+target, width=800, height=640, xaxis=dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/habd.png')
| diaaahmed850/snaproject | diaa.py | diaa.py | py | 5,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plotly.plotly.sign_in",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "plotly.... |
24486912101 | """ Clean up of hail endpoint column in three steps:
- remove unused staging column
- remove now obsolete testing column
- make hail_endpoint_production non null
Revision ID: aa6d3d875f28
Revises: 8bd62cba881a
Create Date: 2020-11-17 09:28:10.910999
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'aa6d3d875f28'
down_revision = '8bd62cba881a'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('user', 'hail_endpoint_staging')
op.drop_column('user', 'hail_endpoint_testing')
op.execute(sa.text('''update "user" set hail_endpoint_production = '' where hail_endpoint_production is null'''))
op.alter_column('user', 'hail_endpoint_production', existing_type=sa.VARCHAR(), server_default='', nullable=False)
def downgrade():
op.alter_column('user', 'hail_endpoint_production',
existing_type=sa.VARCHAR(),
nullable=True)
op.add_column('user', sa.Column('hail_endpoint_testing', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column('hail_endpoint_staging', sa.VARCHAR(), autoincrement=False, nullable=True))
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20201117_09:28:10_aa6d3d875f28_clean_hail_endpoints.py | 20201117_09:28:10_aa6d3d875f28_clean_hail_endpoints.py | py | 1,214 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "alembic.op.drop_column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "alembic.op",... |
15098306537 | #
#
#
import pandas as pd
import geopandas as gpd
from zipfile import ZipFile
from pathlib import Path
import sys,time
def Read_Glob_Bldg( country_geojson ):
CACHE = './PICKLE'
DIR = Path( '/home/phisan/GeoData/GlobML_BldgFP' )
GEOJSON = DIR.joinpath( country_geojson )
STEM = GEOJSON.stem
PICKLE = GEOJSON.parents[0].joinpath( CACHE, STEM+'.bz2' )
if PICKLE.is_file():
print( f'Reading cached "{PICKLE}"...' )
df = pd.read_pickle( PICKLE )
else:
print( f'Reading "{GEOJSON}" might take time ...' )
df = gpd.read_file( GEOJSON )
print( f'Writing cache "{PICKLE}"...')
df.to_pickle( PICKLE, compression='infer' )
return df
def Read_GADM( SYMB ):
''' SYMB = THA | LAO '''
SHP = f'/home/phisan/GeoData/GADM/{SYMB}/gadm40_{SYMB}_1.shp'
df = gpd.read_file( SHP )
return df
#import pdb; pdb.set_trace()
def MakeCentroid( dfBLDG, SYMB ):
FILE_CEN = Path(f'CACHE/dfCENTR_{SYMB}.bz2')
if FILE_CEN.is_file():
print( f'Reading cached "{FILE_CEN}"...' )
dfCENTR = pd.read_pickle( FILE_CEN )
else:
print( f'Caculate centroid ...' )
dfCENTR = dfBLDG[['geometry']].copy()
dfCENTR['geometry'] = dfCENTR['geometry'].centroid
print( f'Writing "{FILE_CEN}" ...' )
dfCENTR.to_pickle( FILE_CEN, compression='infer' )
return dfCENTR
#################################################################
#################################################################
#################################################################
FR,TO = int(sys.argv[1]) , int(sys.argv[2])
COUNTRY = 'Thailand.geojsonl', 'THA'
#COUNTRY = 'Laos.geojsonl', 'LAO'
dfADM = Read_GADM( COUNTRY[1] )
for i in range(FR,TO):
PROV = dfADM.iloc[i:i+1]
print( f'Check processing {i} {PROV.iloc[0].NAME_1} ok...' )
#import pdb; pdb.set_trace()
dfBLDG = Read_Glob_Bldg( COUNTRY[0] )
dfCENTR = MakeCentroid( dfBLDG, COUNTRY[1] )
for i in range(FR,TO):
print( '===========================================')
BEG = time.time()
PROV = dfADM.iloc[i:i+1]
print( f'Processing {i} {PROV.iloc[0].NAME_1} ' )
PROV_NAME = PROV.iloc[0].NAME_1
xmin,ymin,xmax,ymax = PROV.total_bounds
dfCENTR_ = dfCENTR.cx[xmin:xmax, ymin:ymax].copy()
#import pdb; pdb.set_trace()
df_bldg_prov = gpd.sjoin( dfCENTR_, PROV, how='inner', predicate='intersects' )
if len(df_bldg_prov)>0:
df_bldg_prov.to_file( f'CACHE/{i}_{PROV_NAME}.gpkg', driver='GPKG',
layer='Bldg_Centroid' )
dfBLDG.loc[df_bldg_prov.index].to_file( f'CACHE/{i}_{PROV_NAME}.gpkg',
driver='GPKG', layer='Bldg_Polygon' )
SUM_PROV = len( df_bldg_prov )
print( f'Buildings in province : {SUM_PROV:,} ...' )
END = time.time(); ELAP = END-BEG; print( f'{ELAP:,.0f} sec')
print( '=================== Finish ====================')
| phisan-chula/Thai_Bldg_Model | BreakProv_Bldg.py | BreakProv_Bldg.py | py | 2,944 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "geopandas.read_fi... |
73456512104 | # -*- coding: utf-8 -*-
# Time : 2023/10/5 22:53
# Author : QIN2DIM
# GitHub : https://github.com/QIN2DIM
# Description:
import csv
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
class Level:
first = 1
second = 2
third = 3
fourth = 4
fifth = 5
sixth = 6
none = 0
@staticmethod
def get_bonus(level: int):
level2bonus = {0: 0, 6: 5, 5: 10, 4: 200, 3: 3000, 2: -1, 1: -1}
return level2bonus[level]
@staticmethod
def get_zh_level(level: int):
level2zh = {0: "无", 6: "六等奖", 5: "五等奖", 4: "四等奖", 3: "三等奖", 2: "二等奖", 1: "一等奖"}
return level2zh[level]
def is_bingo(red: int, blue: int):
assert 0 <= red <= 6
assert 0 <= blue <= 1
if blue == 0:
if red in [0, 1, 2, 3]:
return Level.none
if red == 4:
return Level.fifth
if red == 5:
return Level.fourth
if red == 6:
return Level.second
if blue == 1:
if red in [0, 1, 2]:
return Level.sixth
if red == 3:
return Level.fifth
if red == 4:
return Level.fourth
if red == 5:
return Level.third
if red == 6:
return Level.first
def compare_nums(mc: List[str], bingo_nums: List[str]):
red = 0
blue = 0
for num_red in mc[:-1]:
if num_red in bingo_nums[:-1]:
red += 1
if mc[-1] == bingo_nums[-1]:
blue += 1
return red, blue
@dataclass
class SSQResult:
red: int = field(default=int)
blue: int = field(default=int)
level: int = field(default=int)
bonus: int = field(default=int)
zh_level: str = field(default=str)
term: str = field(default=str)
class SSQNumsChecker:
def __init__(self, my_nums: List[List[str]]):
self.my_nums = my_nums
def get_results(self, bingo_nums: List[str]):
for i, mc in enumerate(self.my_nums):
red, blue = compare_nums(mc, bingo_nums)
level = is_bingo(red, blue)
yield mc, SSQResult(
red=red,
blue=blue,
level=level,
bonus=Level.get_bonus(level),
zh_level=Level.get_zh_level(level),
)
def trace_results(self, cache_path: Path):
text = cache_path.read_text(encoding="utf8")
reader = csv.reader([k for k in text.split("\n")[1:] if k])
for j, tn in enumerate(reader):
term = tn[0]
bingo_nums = tn[1:]
for i, mc in enumerate(self.my_nums):
red, blue = compare_nums(mc, bingo_nums)
level = is_bingo(red, blue)
yield mc, SSQResult(
red=red,
blue=blue,
level=level,
bonus=Level.get_bonus(level),
zh_level=Level.get_zh_level(level),
term=term,
)
| QIN2DIM/hysterical_ticket | hysterical_ticket/component/bingo_ssq.py | bingo_ssq.py | py | 3,044 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
... |
5511991110 |
import os
from re import M, search
from unicodedata import category
import requests
import json
import psycopg2
import itertools
from flask import Flask, render_template, request, flash, redirect, session, g, jsonify,url_for,abort
from sqlalchemy.exc import IntegrityError
from forms import LoginForm, UserAddForm, PasswordResetForm
from models import db, connect_db, User,Addproduct, FavoriteProduct
from helpers import get_products_from_api_response
CURR_USER_KEY = "curr_user"
app = Flask(__name__)
app.jinja_env.filters['zip'] = zip
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'postgresql:///eCommerce')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', "it's a secret")
API_BASE_URL = "https://fakestoreapi.com"
connect_db(app)
@app.before_request
def add_user_to_g():
"""If we're logged in, add curr user to Flask global."""
if CURR_USER_KEY in session:
g.user = User.query.get(session[CURR_USER_KEY])
else:
g.user = None
def do_login(user):
"""Log in user."""
session[CURR_USER_KEY] = user.id
def do_logout():
"""Logout user."""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
@app.route('/signup', methods=["GET", "POST"])
def signup():
"""Handle user signup.
Create new user and add to DB. Redirect to home page.
If form not valid, present form.
If the there already is a user with that username: flash message
and re-present form.
"""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
form = UserAddForm()
if form.validate_on_submit():
try:
user = User.signup(
username=form.username.data,
password=form.password.data,
email=form.email.data,
)
db.create_all()
db.session.commit()
except IntegrityError:
flash("Username already taken", 'danger')
return render_template('users/signup.html', form=form)
do_login(user)
return redirect("/")
else:
return render_template('users/signup.html', form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
"""Handle user login."""
form = LoginForm()
if form.validate_on_submit():
user = User.authenticate(form.username.data,
form.password.data)
if user:
do_login(user)
flash(f"Hello, {user.username}!", "success")
return redirect("/")
flash("Invalid credentials.", 'danger')
return render_template('users/login.html', form=form)
@app.route('/logout')
def logout():
"""Handle logout of user."""
do_logout()
flash("You have successfully logged out.", 'success')
return redirect("/")
#######################################################
@app.route('/')
def show_products_form():
stores = most_popular_products()
return render_template("home.html", stores=stores)
@app.route('/index',methods=['POST', 'GET'])
def product_by_name():
res = requests.get('https://fakestoreapi.com/products',
)
data = res.json()
if data.get('items') == None:
flash("No item name found", "danger")
return redirect('/')
else:
stores = get_products_from_api_response(data)
return render_template('stores/index.html', stores=stores)
##############################################################################
# The navbar route Links
def most_popular_products():
res = requests.get('https://fakestoreapi.com/products',
)
data = res.json()
return get_products_from_api_response(data)
##### This gives you a list of categories###
@app.route('/category')
def item_by_category():
selected_category = request.args.get('selected_category')
stores = []
if selected_category:
res = requests.get('https://fakestoreapi.com/products/category/'+ selected_category
)
data = res.json()
stores = get_products_from_api_response(data)
return render_template('stores/category.html', stores=stores)
#############################################################################
# Get all the details of the product: Needs fixing
@app.route('/product_detail')
def details_by_id():
product_id =request.args.get('product_id')
res = requests.get('https://fakestoreapi.com/products/'+ product_id)
data = res.json()
stores = []
for item in data:
store = {
'id': item ['id'],
'title': item['title'],
'image': item['image'],
'description': item['description'],
'price':item['price']
}
stores.append(store)
return render_template('stores/product_detail.html', stores=stores)
##############################################################################
# User Homepage- Needs Fixing
@app.route('/users/favorite')
def user_favorite():
user_id = g.user.id
user = User.query.get_or_404(user_id)
if user:
all_items = FavoriteProduct.query.filter_by(
user_id=user_id).order_by(FavoriteProduct.id.desc()).all()
shops = []
for item in all_items:
shop = {'name': item.item_name,
'id': item.item_id, 'thumb': item.item_thum}
shops.append(shop)
return render_template("users/favorite.html", user=user, shops=shops, show_delete=True)
else:
return render_template("users/favorite.html")
@app.route('/users/favorite/<int:item_id>', methods=["GET", "POST"])
def add_favorite(item_id):
"""Add Item id to user favorite."""
user_id = g.user.id
user = User.query.get_or_404(user_id)
item_object = FavoriteProduct.query.filter_by(
item_id=str(item_id),
user_id=str(user_id)
).all()
if not item_object:
res = requests.get(f"{API_BASE_URL}/product",
)
data = res.json()
item = data['items'][0]
item_id = item['id']
item_name = item['title']
item_thum = item['image']
new_item = FavoriteProduct(item_id=item_id,
item_name=item_name, item_thum=item_thum, user_id=user_id)
db.session.add(new_item)
db.session.commit()
return redirect(url_for('user_favorite.html'))
else:
flash("Item already in favorites!", "danger")
return redirect(url_for('show_product_for'))
# -------------------- Remove the favorite product --------------------------->
@app.route('/users/delete/<int:item_id>', methods=["GET", "POST"])
def delete_item(item_id):
"""Have currently-logged-in-user delete product."""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user_favorite_product = FavoriteProduct.query.filter_by(
item_id=str(g.user.id)
).first()
db.session.delete(user_favorite_product)
db.session.commit()
return redirect("/users/favorite.html")
##############################################################################
@app.errorhandler(404)
def page_not_found(e):
"""404 NOT FOUND page."""
return render_template('404/404.html'), 404
##############################################################################
# Turn off all caching in Flask
# (useful for dev; in production, this kind of stuff is typically
# handled elsewhere)
#
# https://stackoverflow.com/questions/34066804/disabling-caching-in-flask
@ app.after_request
def add_header(req):
"""Add non-caching headers on every request."""
req.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
req.headers["Pragma"] = "no-cache"
req.headers["Expires"] = "0"
req.headers['Cache-Control'] = 'public, max-age=0'
return req | MITHIRI1/Capstone-Project-1 | app.py | app.py | py | 8,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line... |
32058791507 | from datetime import datetime
from binance_functions.client_functions.binance_client import CreateClient
from decouple import config
from settings.bot_settings import *
from multiprocessing import Pool
import timeit
import json
class HistoricalData:
def __init__(self):
if config('API_KEY') != None and config('SECRET_KEY') != None:
self.api_key = config('API_KEY')
self.secret_key = config('SECRET_KEY')
else:
self.api_key = ""
self.secret_key = ""
self.symbol_list = SYMBOL_LIST
self.exchange_pair = EXCHANGE_PAIR
self.interval = INTERVAL
self.all_data = dict()
self.my_client = CreateClient(self.api_key, self.secret_key).client()
def historical(self, symbol):
all_datas = self.my_client.get_historical_klines(symbol=symbol+self.exchange_pair, interval=self.interval, limit=1000)
converted_datas = list()
data_dict = dict()
for value in all_datas:
__date = datetime.fromtimestamp(int(str(value[6])[:10]))
__date = __date.strftime('%d/%m/%Y %H:%M')
__open_price = float(value[1])
__high_price = float(value[2])
__low_price = float(value[3])
__close_price = float(value[4])
__new_data = [__date, __open_price, __high_price, __low_price, __close_price]
converted_datas.append(__new_data)
converted_datas.pop()
data_dict[symbol+self.exchange_pair] = converted_datas
return data_dict
def collect_historical(self):
historical_process_start_time = timeit.default_timer()
p = Pool()
result = p.map(self.historical, self.symbol_list)
p.close()
p.join()
historical_process_finish_time = timeit.default_timer()
for data in result:
self.all_data[list(data.keys())[0]] = list(data.values())[0]
with open("./data/all_data.json", 'w') as file:
json.dump(self.all_data, file)
print("Collect Historical Data Process Take:",historical_process_finish_time - historical_process_start_time, "Seconds")
return True | turancan-p/binance-trade-bot | collecting_functions/historical_data.py | historical_data.py | py | 2,183 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "decouple.config",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "binance_functions.cli... |
6318280878 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 12:38:24 2019
@author: MHozayen
Simple Linear Regression
Weighted Linear Regression is commented out
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def predict(x, y, pred):
#degree is unused here
mu = 0.9
ns = len(y)
weights = np.ones(ns)*mu
for k in range(ns):
weights[k] = weights[k]**k
weights = np.flip(weights, 0)
# Fitting SVR to the dataset
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
#weighted linear regression
#lr.fit(x, y, sample_weight=weights)
lr.fit(x, y)
y_pred = lr.predict(pred)
return y_pred | mohamedhozayen/Diabetes-Analytics-Engine | Joe_Deliverable/LR.py | LR.py | py | 772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 29,
"usage_type": "call"
}
] |
3465936756 | import json, requests, subprocess, sys, yaml
from pip._vendor.distlib.compat import raw_input
class JiraClient():
board_status_to_env = {"Ready to Deploy": "QAX",
"QAX Done": "STGX",
"StgX Done": "PROD-EU",
"Prod EU Done": "PROD-US",
"Prod US Done": "UNKNOWN"}
env_to_status_id = {"QAX": "21", "STGX": "31", "PROD-EU": "41", "PROD-US": "51"}
def __init__(self, token):
self.auth_token = token
def build_headers(self):
return {
'Content-Type': "application/json",
'Authorization': "Basic " + self.auth_token,
'Cache-Control': "no-cache"
}
def fetch_tickets_to_deploy(self):
payload = {
"jql": "project = MBI AND issuetype = Story AND status in (\"Ready to Deploy\", \"StgX Done\", \"QAX Done\", \"Prod EU Done\")",
"fields": ["summary"]}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/search'
r = requests.post(url, data=json.dumps(payload), headers=headers)
# Filter only the tickets required for the current deploy date...
issues = r.json()['issues']
return list(map(lambda x: x["key"], issues))
def fetch_ticket_info(self, id):
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + id
r = requests.get(url, headers=headers)
return r.json()
def fetch_subtask_from_id(self, id):
ticket_info = self.fetch_ticket_info(id)
subtasks_ids = ticket_info['fields']["subtasks"]
return list(map(lambda x: x["key"], subtasks_ids))
def fetch_artifact_from_info(self, sid):
ticket_info = self.fetch_ticket_info(sid)
comp = ticket_info["fields"]["components"]
# Fetch component ...
if len(comp) == 0:
raise ValueError(sid + " must have component defined")
artifact_id = comp[0]["name"]
# Fetch version ...
version = ticket_info["fields"]["versions"]
if len(version) == 0:
raise ValueError(sid + " must have version defined")
artifact_version = version[0]["name"]
if len(comp) == 0:
raise ValueError(sid + " must have version defined")
jira_key = ticket_info["key"]
return {"jira_key": jira_key, "artifact_id": artifact_id, "version": artifact_version}
def fetch_artifacts(self, date):
# Fetch all the events ...
all_stories_keys = self.fetch_tickets_to_deploy()
# Filter events to be deployed ...
story_keys = list(
filter(lambda id: self.fetch_ticket_info(id)["fields"]["customfield_13861"] == date, all_stories_keys))
# Fetch the first ticket to be deployed ...
return list(map(lambda sid: self.story_to_deployment_unit(sid), story_keys))
def story_to_deployment_unit(self, story_key):
subtask_ids = self.fetch_subtask_from_id(story_key)
# Fetch artifact version ...
artifacts = list(map(lambda x: self.fetch_artifact_from_info(x), subtask_ids))
# Fetch next environment ...
next_env = self.fetch_next_env_to_deploy(story_key)
return {"jira_key": story_key, "next_env_to_deploy": next_env, "artifacts": artifacts}
def fetch_ticket_status(self, id):
# Fetch ticket info ...
ticket_info = self.fetch_ticket_info(id)
# Get current state ...
status = ticket_info["fields"]["status"]["name"]
return status
def fetch_next_env_to_deploy(self, sid):
board_status = self.fetch_ticket_status(sid)
return self.board_status_to_env.get(board_status)
def fetch_stories(self):
payload = {
"jql": "project = MBI AND issuetype = Story",
"fields": ["summary"]}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/search'
r = requests.post(url, data=json.dumps(payload), headers=headers)
# Filter only the tickets required for the current deploy date...
issues = r.json()['issues']
return list(map(lambda x: x["key"], issues))
def move_next_stage(self, sid):
# Fetch ticket status ...
board_status = self.fetch_ticket_status(sid)
print(board_status)
next_status = self.board_status_to_env[board_status]
# Move ticket to a new status ...
status_id = self.env_to_status_id[next_status]
payload = {
"update": {
"comment": [
{
"add": {
"body": "Automatic flow transitioning based on flow"
}
}
]
},
"transition": {
"id": status_id
}
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + sid + '/transitions'
# Move to next status ...
requests.post(url, data=json.dumps(payload), headers=headers)
def description_commit(self):
pull = "git pull"
diff = "git diff HEAD^ HEAD"
processPull = subprocess.Popen(pull.split(), stdout=subprocess.PIPE)
output, error = processPull.communicate()
if (error is None):
processDiff = subprocess.Popen(diff.split(), stdout=subprocess.PIPE)
output, error = processDiff.communicate()
if (error is None):
return str(output.decode("utf-8"))
else:
return "error"
else:
return "error"
def get_tag(self):
tag = "git describe --tag"
processPull = subprocess.Popen(tag.split(), stdout=subprocess.PIPE)
output, error = processPull.communicate()
if (error is None):
return str(output.decode("utf-8"))
def cli_mbi(self):
project = input("Enter the project initials: ")
description = self.description_commit()
print("You have the followings MBI:")
print(self.fetch_stories())
issue = input("Enter MBI: ")
version = input("The last version is " + self.get_tag()+ ". Enter Version:")
component = self.find_component()
if self.validate_input(project, issue, version, component):
self.create_subtask(project, issue, description, component, version)
else:
print("Exit")
def create_subtask(self, project, issue, description, component, version):
payload = {
"fields":
{
"project":
{
"key": project
},
"parent":
{
"key": issue
},
"summary": "Change log " + issue,
"description": description,
"issuetype":
{
"name": "Sub-task"
},
"components": [
{
"name": component
}
],
"versions": [
{
"name": version
}
]
}
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/'
try:
r = requests.post(url, data=json.dumps(payload), headers=headers)
resp = r.content.decode('utf-8')
jiraKey = json.loads(resp)
print("Issue created: " + jiraKey["key"])
except r.exceptions.HTTPError as err:
print(err)
def validate_input(self, project, mbi, version, component):
question1 = "Project: " + project + " \nMBI: " + mbi + "\nVersion: " + version + "\nComponent: " + component + "\nIt's correct? "
return self.query_yes_no(question1)
def query_yes_no(self, question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def find_component(self):
with open("popeye.yaml") as stream:
try:
file = (yaml.load(stream))
return file["id"]
except yaml.YAMLError as exc:
print(exc)
def update_comment(self, mib_key, comment):
payload = {
"body": comment
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + mib_key + '/comment'
response = requests.post(url, data=json.dumps(payload), headers=headers) | mulesoft-labs/popeye | JiraClient.py | JiraClient.py | py | 9,957 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_numbe... |
6637403650 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
# Create your views here.
def home_view(request):
if request.user.is_authenticated():
context = {
'isim': 'Emine'
}
else:
context = {
'isim': 'Guest'
}
return render(request, 'home.html', context)
| emineksknc/veterinerim | home/views.py | views.py | py | 382 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
}
] |
34647682320 | from pathlib import Path
from uuid import uuid4
from django.core.validators import FileExtensionValidator
from django.db.models import (
CASCADE,
CharField,
DateField,
FileField,
ForeignKey,
IntegerChoices,
IntegerField,
Model,
TextField,
URLField,
)
from django.utils.translation import gettext_lazy as _
def get_file_name_in_storage(instance, filename):
name = f"{uuid4()}-{filename}"
return Path(instance._meta.model_name) / str(instance.kid.id) / name
class Kid(Model):
class Color(IntegerChoices):
BLACK = 1, _("Preto")
BROWN = 2, _("Castanhos")
BLONDE = 3, _("Loiro")
RED = 4, _("Ruivo")
BLUE = (
5,
_("Azul"),
)
SWARTHY = 6, _("Morena")
WHITE = 7, _("Branca")
# required fiels
name = CharField("Nome", max_length=255, db_index=True, unique=True)
url = URLField("URL")
full_text = TextField()
# optional indexed fields
dob = DateField("Data de nascimento", null=True, blank=True, db_index=True)
missing_since = DateField(
"Desaparecida(o) desde", null=True, blank=True, db_index=True
)
eyes = CharField(
"Cor dos olhos",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
hair = CharField(
"Cor dos cabelos",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
skin = CharField(
"Cor da pele",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
# optional fields
mother = CharField("Mãe", max_length=255, null=True, blank=True)
father = CharField("Pai", max_length=255, null=True, blank=True)
last_seen_at_city = CharField(
"Cidade onde foi vista(o) pela última vez",
max_length=255,
null=True,
blank=True,
db_index=True,
)
last_seen_at_state = CharField(
"UF onde foi vista(o) pela última vez",
max_length=2,
null=True,
blank=True,
db_index=True,
)
age_at_occurrence = IntegerField("Idade quando desapareceu", null=True, blank=True)
class Meta:
verbose_name = "criança"
ordering = ("name",)
def __str__(self):
return self.name
class KidImage(Model):
kid = ForeignKey(Kid, on_delete=CASCADE, verbose_name="criança")
image = FileField(
verbose_name="Foto",
upload_to=get_file_name_in_storage,
validators=(
FileExtensionValidator(allowed_extensions=("jpg", "jpeg", "png", "gif")),
),
)
def __str__(self):
return self.kid.name
class Meta:
verbose_name = "foto"
verbose_name_plural = "fotos"
| cuducos/fio-de-ariadne | web/core/models.py | models.py | py | 2,873 | python | en | code | 78 | github-code | 36 | [
{
"api_name": "uuid.uuid4",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.Integ... |
25353856217 | import http.server
import socketserver
import cgi
import pymongo
import json
import bcrypt
import secrets
import hashlib
import base64
from datetime import datetime, timedelta
import helperFunction as helper
SOCKET_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
TEXT_FRAME = 1
OPCODE_MASK = 0b00001111
PAYLOAD_LEN_MASK = 0b01111111
FIRST_BYTE_TEXT_FRAME = b'\x81'
SECOND_BYTE_LEN126 = b'\x7E'
SECOND_BYTE_LEN127 = b'\x7F'
FRAME_LEN_NO_METADATA = 1010
projects_list = []
PORT = 8000
HOST = "0.0.0.0"
localHost = "mongo"
mongoclient = pymongo.MongoClient(localHost)
storedUsers = mongoclient["users"]
user_accounts = storedUsers["user_accounts"]
projects = storedUsers["projects"]
online_users = storedUsers["online"]
#new_online_user = storedUsers["timeout"]
postFormat = '<div class="post"><hr>Project Name: Project1<b style="position:relative; left: 480px;">Rating: 7 <button style="background-color:green">👍</button><button style="background-color:red">👎</button></b><br><img src="../images/test.png" style="width:400px;height:200px;"><br>Description:<br>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.<br><br><small>By: User1</small></div>'
def readFile(filename, type):
filename = filename.replace("%20", " ")
fileContent = None
file = open(filename, "r") if type == "str" else open(filename, "rb")
fileContent = file.read()
return fileContent
def loadProjects():
DBprojects = []
for project in projects.find():
projectHTML = helper.gen_project_post_html_asbytes(project["account"], project["projectname"], project["projectdescription"], project["imagepath"], project["rating"])
DBprojects.append(projectHTML)
return DBprojects
def replaceFormat(project):
projectLine = postFormat.replace("Project Name: Project1", "Project Name: " + project["name"])
projectLine = projectLine.replace("Description:<br>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", "Description:<br>" + project["desc"])
projectLine = projectLine.replace('src="../images/test.png"', 'src="../images/projectImages/' + project["img"] + '"')
return projectLine
def serve_htmltext_and_goto(self, token, text, link, time):
if link != None:
text += '<meta http-equiv="refresh" content="'+str(time)+'; url='+link+'" />'
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("X-Content-Type-Options", "nosniff")
if token != None:
self.send_header("Set-Cookie", "session-token=" + token + "; Max-Age=600")
self.send_header("Content-Length", str(len(text)))
self.end_headers()
self.wfile.write(text.encode())
'''
Open a file at filepath(string) in bytes, get the mimetype and then serve it.
Return true if successful otherwise serve a 404 & return false.
Checks files for various html template placeholders, replaces them with data if encountered
Perform tasks specific to username(bytes) if it's not None
Return True on success, else False
'''
def serve_file(self, filepath, username)->bool:
#queriedaccount example -> localhost:8000/html/profile.html?user=somename
queriedaccount = None
#session token is later retrieved and used to reset log out timer
token = None
if '?user=' in filepath:
queriedaccount = filepath.split('?user=')[1]
filepath = filepath.split('?user=')[0]
#Open file, get content
try:
f = open(filepath, 'rb')
except:
give_404(self)
return False
b = f.read()
#Get mimetype, serve 403 if filetype not in mimetypes dictionary
mimetype = helper.get_mimetype(filepath)
if mimetype == None:
give_403(self)
return False
projectslist = b''
for project in projects_list:
projectslist += project
b = b.replace(b'{{projectslist}}',projectslist)
#Get all usernames in database, make the the html for the frontend, insert if placeholder found
alluserslist = b''
for item in user_accounts.find():
alluserslist += helper.gen_user_list_segment(item['account'].encode())
b = b.replace(b'{{alluserslist}}', alluserslist)
#Same as above but only for currently online users
onlineuserslist = b''
for item in online_users.find():
onlineuserslist += helper.gen_user_list_segment(item['account'].encode())
b = b.replace(b'{{onlineuserslist}}', onlineuserslist)
#Show login status if username exists otherwise dont, and hide anything with the {{hideornot}} placeholder
if username != None:
b = b.replace(b'{{loggedin_temp}}', b'Currently logged in as: '+ username.encode())
b = b.replace(b'{{username_placeholder}}', username.encode())
else:
b = b.replace(b'{{loggedin_temp}}', b'')
b = b.replace(b'{{hideornot}}',b'hidden')
'''NOTE: can currently comment this^ line out for testing purposes, but final version would have that line active'''
#If an account profile was not queried and the user is not logged in, hide certain frontend data
if queriedaccount == None and username == None:
b = b.replace(b'{{userbio}}',b'')
b = b.replace(b'{{hideifnotthisuser}}', b'hidden')
#else if a profile wasnt queried but a user name is supposedly logged in, make sure that account exists
#and refresh their session cookie and online status if so
elif queriedaccount == None and username != None:
#get queried account's bio and replace placeholder with it
retrieved_account = user_accounts.find_one({"account": username})
if retrieved_account == None:
self.serve_htmltext_and_goto(self, None,'<h1>That username does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/html/users.html', 5)
return
userbio = helper.gen_user_bio_html(username,retrieved_account['bio'])
b = b.replace(b'{{userbio}}',userbio)
#account login status refresh
token = helper.parse_cookies(self.headers.get("Cookie")).get("session-token", None)
account_to_refresh = online_users.find_one({"account": username})
account_to_refresh['date'] = datetime.utcnow()
online_users.save(account_to_refresh)
#if an account is queried(and exists), show their profile page and hide the bio updater form
elif queriedaccount != None:
retrieved_account = user_accounts.find_one({"account": queriedaccount.encode()})
if retrieved_account == None:
self.serve_htmltext_and_goto(self, None,'<h1>That username does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/html/users.html', 5)
return
userbio = helper.gen_user_bio_html(queriedaccount.encode(),retrieved_account['bio'])
b = b.replace(b'{{userbio}}',userbio)
b = b.replace(b'{{hideifnotthisuser}}', b'hidden')
#Create appropriate response
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('X-Content-Type-Options', 'nosniff')
#reset session cookie to another 10 minutes
if username != None and token != None:
self.send_header('Set-Cookie', 'session-token=' + token + '; Max-Age=600')
self.send_header('Content-Length', str(len(b)))
self.end_headers()
self.wfile.write(b)
#Close file and send response
f.close()
return True
# looks at token and gets username
def get_username(self):
username = None
# get session token and check username
user_token = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
username = retrieved_account['account'].replace('&','&').replace('<','<').replace('>','>')
#loop through all instances and do bcrypt.checkpw on the retrieved token and the
return username
def handleSocket(self):
socket_key = self.headers.get("Sec-WebSocket-Key").encode() + SOCKET_GUID
base64_socket_key = base64.b64encode(hashlib.sha1(socket_key).digest())
response = b'HTTP/1.1 101 Switching Protocols\r\n'
response += b'Connection: Upgrade\r\n'
response += b'Upgrade: websocket\r\n'
response += b'Sec-WebSocket-Accept: ' + base64_socket_key + b'\r\n\r\n'
self.request.sendall(response)
# keep track of sockets
self.active_sockets.append(self.request)
account_name = get_username(self)
if account_name != None:
self.dm_sockets[account_name] = self.request
socket_data = b' '
while socket_data:
#Try receiving data, break loop on any exception
try:
socket_data = self.request.recv(1024)
except:
break
#Get the opcode
opcode = None
if socket_data:
opcode = socket_data[0] & OPCODE_MASK
#if its a text frame(do nothing otherwise)
if opcode == TEXT_FRAME and account_name != None:
msg_type = None
#get payload length
payload_len = socket_data[1] & PAYLOAD_LEN_MASK
#Self explanatory: get data from the packets as defined for the three payload sizes
if payload_len < 126:
masking_key = socket_data[2:6]
payload_data = socket_data[6:(6 + payload_len)]
elif payload_len == 126:
payload_len = int.from_bytes(socket_data[2:4], byteorder='big', signed=False)
masking_key = socket_data[4:8]
if (FRAME_LEN_NO_METADATA - payload_len) < 0:
socket_data += self.request.recv(65536)
payload_data = socket_data[8:(8 + payload_len)]
elif payload_len == 127:
payload_len = int.from_bytes(socket_data[2:10], byteorder='big', signed=False)
masking_key = socket_data[10:14]
socket_data += self.request.recv(payload_len)
payload_data = socket_data[14:(14 + payload_len)]
#Decode payload with the masking key
decoded_payload = b''
for idx, byte in enumerate(payload_data):
decoded_payload += (byte ^ masking_key[idx % 4]).to_bytes(1, byteorder='big', signed=False)
#Remove html from payload
decoded_payload = decoded_payload.replace(b'&',b'&').replace(b'<',b'<').replace(b'>',b'>')
#Start the outgoing payload
outgoing_payload = None
#if websocket was used to rate project
if b'"projectname"' in decoded_payload:
msg_type = "rating"
#Extract project name and the value to be added to the rating (1 or -1)
project_name = helper.extract_segment(decoded_payload, b'"projectname":"',b'","addedvalue"')
added_value = int(helper.extract_segment(decoded_payload, b'"addedvalue":',b'}').decode())
#Get the project by name and update it with a +1 or -1
project_to_rate = projects.find_one({"projectname": project_name.decode()}) #change this
project_to_rate['rating'] = new_rating = str(int(project_to_rate['rating']) + added_value)
projects.save(project_to_rate)
#Refresh the projects_list list
projects_list.clear()
for item in projects.find():
formatted_project_post_html = helper.gen_project_post_html_asbytes(item['account'], item['projectname'], item['projectdescription'], item['imagepath'], item['rating'])
projects_list.append(formatted_project_post_html)
#Set up outgoing payload for project rating
outgoing_payload = b'{"projectname":"'+project_name+b'","updatedvalue":'+new_rating.encode()+b'}'
#else if websocket was used to send message
elif b'"chatmessage"' in decoded_payload:
msg_type = "dm"
#Extract the various data
msg_sender = None
sender_token = helper.extract_segment(decoded_payload, b'"sender":"',b'","recipient"')
msg_recipient = helper.extract_segment(decoded_payload, b'"recipient":"',b'","chatmessage"')
chat_message = helper.extract_segment(decoded_payload, b'"chatmessage":"',b'"}')
#Fine the account this message was sent from based on the token given
#if no account was found give them the name "Anonymous" THOUGH this shouldnt ever occur
msg_sender = b'Anonymous'
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(sender_token).hexdigest()})
if retrieved_account != None:
msg_sender = retrieved_account['account'].encode()
#set up outgoing payload for a message
outgoing_payload = b'{"sender":"'+msg_sender+b'","recipient":"'+msg_recipient+b'","chatmessage":"'+chat_message+b'"}'
#Set up outgoing frame as required for different sized payloads
payload_len = len(outgoing_payload)
outgoing_frame = FIRST_BYTE_TEXT_FRAME
if payload_len < 126:
outgoing_frame += payload_len.to_bytes(1, byteorder='big', signed=False)
elif payload_len >= 65536:
outgoing_frame += SECOND_BYTE_LEN127
outgoing_frame += payload_len.to_bytes(8, byteorder='big', signed=False)
elif payload_len >= 126:
outgoing_frame += SECOND_BYTE_LEN126
outgoing_frame += payload_len.to_bytes(2, byteorder='big', signed=False)
outgoing_frame += outgoing_payload
if msg_type == "rating":
#Send outgoing frame to all connected sockets(includes itself)
for socket in self.active_sockets:
socket.sendall(outgoing_frame)
elif msg_type == "dm":
#Send dms only to the sockets for the two members, and only bother if they're online
if msg_sender.decode() in self.dm_sockets:
self.dm_sockets[msg_sender.decode()].sendall(outgoing_frame)
if msg_recipient.decode() in self.dm_sockets and msg_sender != msg_recipient:
self.dm_sockets[msg_recipient.decode()].sendall(outgoing_frame)
#remove this socket on socket close
self.active_sockets.remove(self.request)
self.dm_sockets.pop(account_name, None)
def pathLocation(path, self):
path = path.replace("%20", " ")
if path == '/':
username = get_username(self)
serve_file(self, './index.html', username)
elif path.find(".html") != -1: #make conditional statement for project.html, helper function to look thru all entries in projects database and populate placeholder with such entries
username = get_username(self)
serve_file(self, './' + path[1:], username)
elif path.find(".js") != -1:
response = readFile(path[1:], "str")
self.send_response(200)
self.send_header("Content-Type", "text/javascript")
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response.encode())
elif path.find(".css") != -1:
response = readFile(path[1:], "str")
self.send_response(200)
self.send_header("Content-Type", "text/css")
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response.encode())
elif path.find("/images/") != -1:
if path[1:5] == "html":
response = readFile(path[6:], "bytes")
else:
response = readFile(path[1:], "bytes")
imageType = path.split(".")[1]
self.send_response(200)
self.send_header("Content-Type", "image/" + imageType)
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response)
elif path == "/logout":
if get_username(self) != None:
online_users.delete_many({"account" : get_username(self)})
helper.logout(self, helper.parse_cookies(self.headers.get("Cookie")).get("session-token", None),'<h1>You have logged out.</h1><br><h2>Returning in 3 seconds...</h2>', '/', 3)
elif path == "/websocket":
handleSocket(self)
else:
self.send_response(404)
self.end_headers()
def sendRedirect(self, path):
self.send_response(301)
self.send_header("Location", path)
self.end_headers()
def give_403(self):
self.send_response(403)
self.send_header("Content-Type", "text/plain")
self.send_header("Content-Length", "20")
self.end_headers()
self.wfile.write(b"Error 403: Forbidden")
def give_404(self):
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.send_header("Content-Length", "20")
self.end_headers()
self.wfile.write(b"Error 404: Not Found")
def postPathing(self, path, length, isMultipart):
if isMultipart:
boundary = {'boundary': self.headers.get_boundary().encode(), "CONTENT-LENGTH": length}
if path == "/enternewuser":
data = cgi.parse_multipart(self.rfile, boundary)
name = data["enternewuser"][0]
pwd = data["enternewpass"][0]
rePwd = data["confirmnewpass"][0]
entry = {"name": name, "pwd": pwd}
# inserted = entryQuery("insert", entry) #deal with front end warning depending on the boolean value, false means username already exists and cannot be duplicated
if pwd != rePwd:
serve_htmltext_and_goto(self,None,'<h1>The passwords do not match. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if name == pwd:
serve_htmltext_and_goto(self, None,'<h1>You cant pick a password equal to your username. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if user_accounts.find_one({"account": name}) != None:
name = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>The account name ['+name+'] is already in use. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if len(pwd) < 8:
serve_htmltext_and_goto(self, None,'<h1>The password did not meet the required length(>=8). Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
pass_salt = bcrypt.gensalt()
hashed_pass = bcrypt.hashpw(pwd.encode(), pass_salt)
new_account = {
'account': name,
'pass' : hashed_pass,
'token' : bcrypt.hashpw(secrets.token_urlsafe(16).encode(), pass_salt),
'bio' : 'Empty bio'
}
user_accounts.insert_one(new_account)
new_username = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Account created: '+new_username+'</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
elif path == "/loginuser":
data = cgi.parse_multipart(self.rfile, boundary)
name = data["loginusername"][0]
pwd = data["loginuserpass"][0]
retrieved_account = user_accounts.find_one({"account": name})
if retrieved_account == None:
name = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Login failed: The account['+name+'] does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
retrieved_pass = retrieved_account['pass']
if not bcrypt.checkpw(pwd.encode(), retrieved_pass):
login_username = name.replace('&','&').replace('<','<').replace('>','>')
login_pass = pwd.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Login failed: The password['+pwd+'] is incorrect for the account['+pwd+']. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
token = secrets.token_urlsafe(16)
tokenHashed = hashlib.sha256(token.encode()).hexdigest()
user_accounts.update({'account' : name}, {"$set": {'token': tokenHashed}})
'''NOTE: Accounts stay logged in for up to 10 minutes of idle time, timer is reset upon any recieved request'''
online_users.create_index("date", expireAfterSeconds=600)
new_online_user = {
'account':name,
'date':datetime.utcnow()
}
online_users.insert_one(new_online_user)
login_username = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, token,'<h1>You successfully logged in as: '+name+'</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
elif path == "/uploadproject": #parse manually for filename, add to database, redirect to project.html | associated filename with project index number, write file to directory (images/projectImages/filename)
fileData = self.rfile.read(length)
fileData = fileData.split(b'--' + self.headers.get_boundary().encode())
project_name = fileData[1].split(b'\r\n\r\n')[1].strip(b'\r\n').decode()
project_name = project_name.replace('&','&').replace('<','<').replace('>','>')
project_description = fileData[2].split(b'\r\n\r\n')[1].strip(b'\r\n').decode()
project_description = project_description.replace('&','&').replace('<','<').replace('>','>')
imageSection = fileData[3].split(b'\r\n\r\n')
image_path = imageSection[0].split(b'\r\n')[1].split(b'filename=')[1].strip(b'"').decode()
image_path = "images/projectImages/" + image_path
imageData = imageSection[1]
#Make sure user submitted an image, give a 403 error otherwise
'''NOTE: currently image uploads only work properly with jpegs'''
if helper.get_mimetype(image_path)[0:5] != 'image':
give_403(self)
return
# store image data in "images/projectImages/"
with open(image_path, "wb") as imageFile:
imageFile.write(imageData)
#Default username if project is submitted without being logged in, THOUGH this shouldnt ever occur
username = "Anonymous"
# get session token and check
user_token = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
username = retrieved_account['account'].replace('&','&').replace('<','<').replace('>','>')
#Create a dictionary for this post submission, formatted for the db
project_post = {
"account":username,
"projectname":project_name,
"projectdescription":project_description,
"imagepath":"../" + image_path,
"rating":'0'
}
# add post to db
projects.insert_one(project_post)
formatted_project_post_html = helper.gen_project_post_html_asbytes(username, project_name, project_description, image_path, '0')
#Add this html to the projects_list list
projects_list.append(formatted_project_post_html)
sendRedirect(self, "/html/projects.html")
elif path == "/updatebio":
data = cgi.parse_multipart(self.rfile, boundary)
# get bio text
newbio = data["biotext"][0].replace('&','&').replace('<','<').replace('>','>')
#Get all cookies into a list, extract the session token cookie if present
'''NOTE: Currently there is only one cookie, the session token one'''
user_token = None
account_name = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
retrieved_account['bio'] = newbio
user_accounts.save(retrieved_account)
else:
give_403(self)
sendRedirect(self, "html/profile.html")
else:
give_404(self)
class server(http.server.SimpleHTTPRequestHandler):
active_sockets = []
dm_sockets = {}
def do_GET(self):
path = self.path
response = pathLocation(path, self)
return response
def do_POST(self):
path = self.path
length = int(self.headers.get("Content-Length"))
isMultipart = True if "multipart/form-data" in self.headers.get("Content-Type") else False
postPathing(self, path, length, isMultipart)
with socketserver.ThreadingTCPServer((HOST, PORT), server) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
| jackyzhu209/312-Project | website/httpserver.py | httpserver.py | py | 27,742 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "helperFunction.gen_project_post_html_asbytes",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "helperFunction.get_mimetype",
"line_number": 95,
"usage_type": "call"
... |
36402183870 | from datetime import datetime, timedelta
date_format = "%d.%m.%Y"
print("Laenukalkulaator")
amount = None
while amount is None:
try:
amount = int(input("laenusumma (täisarv): "))
if amount <= 0:
print("Sisestatud väärtus peab olema suurem kui 0")
amount = None
except ValueError:
print("Kontrolli sisestatud väärtust")
laenuperiood_aastates = None
while laenuperiood_aastates is None:
try:
laenuperiood_aastates = int(input("laenuperiood aastates (täisarv): "))
if laenuperiood_aastates <= 0:
print("Sisestatud väärtus peab olema suurem kui 0")
laenuperiood_aastates = None
except ValueError:
print("Kontrolli sisestatud väärtust")
intressi_protsent = None
while intressi_protsent is None:
try:
intressi_protsent = float(input("intressi protsent (ujukomaarv): "))
if intressi_protsent < 0:
print("Sisestatud intressi protsent peab olema positiivne")
except ValueError:
print("Kontrolli sisestatud väärtust")
maksegraafik = None
while maksegraafik not in ("a", "p"):
maksegraafik = input("tagasimaksegraafiku tüüp a) annuiteet; p) võrdsed põhiosad: ")
if maksegraafik not in ("a", "p"):
print("Kontrolli sisestatud väärtust (a või p)")
start_date = None
while start_date is None:
start_date_str = input("maksete alguskuupäev (pp.kk.aaaa): ")
try:
start_date = datetime.strptime(start_date_str, date_format)
except ValueError:
print("Sisesta kuupäevad õiges vormingus")
amount_left = amount
payment_date = start_date
print("Maksekuupäev\tJääk\t\tPõhiosa tagasimakse\tIntressimakse\tKokku")
payment_i = 0
total_payment_per_month = 0
main_payment = 0
if maksegraafik == "a":
total_payment_per_month = amount/(
(1 - 1/(1+intressi_protsent/100/12)**(laenuperiood_aastates*12))/(intressi_protsent/100/12)
)
else:
main_payment = amount / (12 * laenuperiood_aastates)
while amount_left > 0.001:
payment_month = start_date.month + payment_i
payment_year = start_date.year + (payment_month - 1) // 12
payment_month = (payment_month - 1) % 12 + 1
days_in_month = (datetime(payment_year + (payment_month // 12), payment_month % 12 + 1, 1) -
datetime(payment_year, payment_month, 1)).days
payment_day = min(days_in_month, start_date.day)
payment_date = datetime(payment_year, payment_month, payment_day)
interest_payment = amount_left * (intressi_protsent / 100 / 12)
if maksegraafik == "a":
main_payment = total_payment_per_month - interest_payment
else:
total_payment_per_month = main_payment + interest_payment
print("{}\t{:9.2f}\t{:.2f}\t\t\t{:.2f}\t\t{:.2f}".format(
payment_date.strftime(date_format),
amount_left,
main_payment,
interest_payment,
total_payment_per_month
))
amount_left -= main_payment
payment_i += 1
| marianntoots/Programmeerimine_2021 | laenukalkulaator.py | laenukalkulaator.py | py | 3,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "dateti... |
2909760362 | import streamlit as st
import leafmap.kepler as leafmap
import geopandas as gpd
def app():
st.title("Kaavoitetut rakennukset tyypeittäin")
st.markdown(
"""
Väritä, visualisoi ja filtteröi aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla.
"""
)
m = leafmap.Map(center=[60.174, 24.802], zoom=15.5, height=600, widescreen=False)
gdf = gpd.read_file("http://pygeoapi-testing.gispocoding.fi/collections/koonti_koko_suomi_kaavakohteet/items?f=json&limit=1000")
gdf['kaavoitusteema'] = gdf['kaavoitusteema'].astype('str')
gdf['kaavamaarayslaji'] = gdf['kaavamaarayslaji'].astype('str')
df = gdf[gdf["kaavamaarayslaji"].str.contains("rakennus")]
df = df[['id_kaava','kaavoitusteema','kaavamaarayslaji', 'numeerinen_arvo']]
df.groupby('id_kaava')['kaavamaarayslaji'].value_counts()
m.to_streamlit(height=700)
st.markdown(
"""
## Yhteenlaskettu kerrosala kaavakohtaisesti
"""
)
df = gdf[["id_kaava","kaavoitusteema", "kaavamaarayslaji", "numeerinen_arvo"]]
st.dataframe(df.groupby('id_kaava')['numeerinen_arvo'].sum().rename_axis('Kaava-id').reset_index(name='Määrä'), width=400) | SanttuVP/SpatialPlanning_vizualization_streamlit | apps/rakennustyypit.py | rakennustyypit.py | py | 1,216 | python | fi | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "leafmap.kepler.Map",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "leafmap.kepler",
... |
20484377775 | from collections import Counter
# initializing string
test_str = "aabbbccde"
# using collections.Counter() to get
# count of each element in string
res = Counter(test_str)
# valuesList = list(res.values())
# # printing result
# str1 = str(res)
print(res)
keysList = list(res.keys())
print(keysList)
# print(valuesList)
for item in sorted(res):
print("{} {}".format(item,res[item]))
| prathammodi333/python-programs | pr1.py | pr1.py | py | 407 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 8,
"usage_type": "call"
}
] |
34987945802 | import sqlite3
import re
from gcpTalent import create_company
def sanitize_company_name(input_string):
# Replace spaces with underscores
sanitized_string = input_string.replace(' ', '_')
# Remove special characters using regular expression
sanitized_string = re.sub(r'[^a-zA-Z0-9_]', '', sanitized_string)
# Convert to lowercase
sanitized_string = sanitized_string.lower()
return sanitized_string
# Connect to the SQLite database
conn = sqlite3.connect('../jobs.db')
cursor = conn.cursor()
# Replace 'your_table' with the actual table name and 'your_column' with the actual column name
query = 'SELECT DISTINCT company FROM jobs'
# Execute the query
cursor.execute(query)
# Fetch all the unique values from the column
companies = cursor.fetchall()
# Close the connection
conn.close()
for company in companies:
project_id = 'laborup'
tenant_id = '065a2ef4-6bf2-4341-a621-29abad6031d8'
display_name = company[0]
external_id = sanitize_company_name(company[0])
create_company(project_id, tenant_id, display_name, external_id) | LoganOneal/job-scraper | gcp-talent/createCompanies.py | createCompanies.py | py | 1,099 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gcpTalent.create_company",
"line_number": 40,
"usage_type": "call"
}
] |
70606642025 | import os
import sys
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import pandas as pd
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from clickhouse_sqlalchemy import make_session
from util.CommonUtils import get_process_num, get_spark
class sqlalchemyUtil(object):
def __init__(self):
self.process_num= get_process_num()
self.engine = create_engine('mysql+pymysql://root:123456@hadoop102:3306/stock?charset=utf8',
pool_size=self.process_num * 2, max_overflow=self.process_num * 2, pool_timeout=50,
pool_recycle=3600,pool_pre_ping=True)
self.conn = self.engine.connect()
self.session = sessionmaker(self.engine)
self.txn=self.conn.begin()
#链接数据库
def mysqlConcnet(self):
'''
链接数据库
'''
print('连接主机',self.engine.dialect)
def closeEngine(self):
self.conn.close()
self.engine.dispose()
class pymysqlUtil(object):
def __init__(self,host='hadoop102',user='root',password='123456',port=3306,db='stock',charset='utf8'):
self.process_num = get_process_num()
self.db=pymysql.connect(host=host,user=user,password=password,port=port,db=db,charset=charset)
self.cursor=self.db.cursor()
#链接数据库
def mysqlConcnet(self):
'''
链接数据库
'''
print('连接主机',self.db.get_host_info())
def closeResource(self):
self.curson.close()
self.db.close()
# Liunx系统 window系统可能会有问题
class hiveUtil():
'''sql末尾不能放;号'''
def __init__(self):
self.engine = create_engine('hive://cgy:123456@hadoop102:10000/stock?auth=CUSTOM')
self.conn = self.engine.connect()
def __enter__(self):
return self.engine
def __exit__(self):
self.conn.close()
self.engine.dispose()
class clickhouseUtil():
'''sql末尾不能放;号'''
def __init__(self):
self.process_num = get_process_num()
self.engine = create_engine('clickhouse://default:''@hadoop102:8123/stock?auth=CUSTOM',
pool_size=self.process_num * 2, max_overflow=self.process_num * 2, pool_timeout=50,
pool_recycle=3600, pool_pre_ping=True
)
self.session = make_session(self.engine)
# def execute_query(self,sql):
# """查询"""
# self.cursor = self.session.execute(sql)
# try:
# fields = self.cursor._metadata.keys
# return pd.DataFrame([dict(zip(fields, item)) for item in self.cursor.fetchall()])
# except Exception as e:
# print(e)
def execute(self,sql):
try:
self.cursor = self.session.execute(sql)
except Exception as e:
print(e)
def execute_query(self, sql):
return pd.read_sql(sql, self.engine)
def execute_insert(self, tableName, df, if_exists='append'):
# append追加 replace全量覆盖
df.to_sql(name=tableName, con=self.engine, if_exists=if_exists, index=False, index_label=False, chunksize=10000)
print('{}插入CK成功!!!'.format(tableName))
def spark_insert_ck(self, tableName,spark_df,if_exists='append'):
'''不弄了烦 Caused by: java.lang.ClassNotFoundException: com.clickhouse.client.logging.LoggerFactory'''
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': '',
'isolationLevel': 'NONE'}
spark_df.write.jdbc(url='jdbc:clickhouse://default:''@hadoop102:8123/hive',table=tableName, mode=if_exists, properties=properties)
# spark_df.write.jdbc(url='jdbc:clickhouse://{url}:8123/hive',table=tableName, mode=if_exists, properties=properties)
def spark_read_ck(self, tableName,spark_df):
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': ''}
spark_df.read.jdbc(url='jdbc:clickhouse://{url}:8123/hive',table=tableName, properties=properties)
def __exit__(self):
self.cursor.close()
self.session.close()
self.engine.dispose()
# python /opt/code/pythonstudy_space/05_quantitative_trading_hive/util/DBUtils.py
if __name__ == '__main__':
# sql = 'SHOW TABLES'
# sql = 'select * from test'
appName = os.path.basename(__file__)
spark = get_spark(appName)
df = pd.DataFrame({"json": ['c', 'd']})
print(df)
# spark_df = spark.sql("""
# select trade_date,
# industry_plate_code as plate_code,
# industry_plate as plate_name,
# open_price
# from stock.ods_dc_stock_industry_plate_hist_di
# where td = '2023-02-07'
# """)
spark_df = spark.createDataFrame(df)
print(spark_df.show())
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': '',
'isolationLevel': 'NONE'}
# spark_df.write.jdbc(url='jdbc:clickhouse://default:''@hadoop102:8123/hive', table='test', mode='append',properties=properties)
spark_df.write.jdbc(url='jdbc:clickhouse://{url}:8123/hive', table='test', mode='append',properties=properties)
clickhouseUtil().spark_insert_ck('test',spark_df)
spark.stop()
print('插入成功!!!') | cgyPension/pythonstudy_space | 05_quantitative_trading_hive/util/DBUtils.py | DBUtils.py | py | 6,392 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_n... |
30222648461 | import torch.nn as nn
class RPN(nn.Module):
def __init__(self, input_channels, anchor_count):
super(RPN, self).__init__()
self.ContainsObjectClassifier = nn.Conv2d(
in_channels = input_channels,
out_channels = 2*anchor_count,
kernel_size = 1,
stride = 1,
padding = 0)
self.RegionRegressor = nn.Conv2d(
in_channels = input_channels,
out_channels = 4*anchor_count,
kernel_size = 1,
stride = 1,
padding = 0)
def forward(self, features):
class_predictions = self.ContainsObjectClassifier(features)
original_class_predictions_shape = class_predictions.shape
class_predictions = class_predictions.view((
original_class_predictions_shape[0], # Batch size
2, # Class predictions (per anchor)
original_class_predictions_shape[1]//2, # Anchor count
original_class_predictions_shape[2], # Feature map width
original_class_predictions_shape[3])) # Feature map height
region_predictions = self.RegionRegressor(features)
original_region_predictions_shape = region_predictions.shape
region_predictions = region_predictions.view((
original_region_predictions_shape[0], # Batch size
4, # Bounding box regression outputs (per anchor)
original_region_predictions_shape[1]//4,# Anchor count
original_region_predictions_shape[2], # Feature map width
original_region_predictions_shape[3])) # Feature map height
return class_predictions, region_predictions | jday96314/Kuzushiji | ImageProcessing/DualNetworkApproach/RegionProposal/RPN.py | RPN.py | py | 1,450 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
20736597554 | import random
import datetime
st = datetime.datetime.now()
tai_moji = 10 # 対象文字数
ke_moji = 2 # 欠損文字数
chance = 2 # 試行回数
def shutudai(alh):
moji = random.sample(alh, tai_moji)
print("対象文字", end = " ")
for i in moji:
print(i, end = " ")
print()
nai_moji = random.sample(moji, ke_moji)
print("表示文字", end = " ")
for i in moji:
if i not in nai_moji:
print(i, end = " ")
print()
print("デバック用欠損文字", nai_moji)
return nai_moji
def kaito(ans):
num = int(input("欠損文字はいくつあるでしょうか?:"))
if num != ke_moji:
print("不正解です")
for i in range(num):
a = input(f"{i + 1}文字目を入力してください:")
if a not in ans:
print("不正解です。残念です")
return False
else:
ans.remove(a)
else:
print("完全クリアです!!!!")
return True
return False
if __name__ == "__main__":
alh = [chr(i + 65) for i in range(26)]
nai_moji = shutudai(alh)
for i in range(chance):
hantei = kaito(nai_moji)
if hantei:
break
else:
print("-" * 20)
ed = datetime.datetime.now()
print(f"所要時間 :{(ed-st).seconds} s") | c0b2108596/ProjExD | ex01/alphabet.py | alphabet.py | py | 1,363 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.sample... |
20715607582 | # HAPPY NEW YEAR... or something.
import re
from collections import defaultdict
from itertools import repeat
DIRECTIONS = {
'se': (.5, 1),
'sw': (-.5, 1),
'nw': (-.5, -1),
'ne': (.5, -1),
'e': (1, 0),
'w': (-1, 0),
}
def find_tile(reference):
reference = re.findall('se|sw|nw|ne|e|w', reference)
pos = (0, 0)
for step in reference:
pos = tuple(map(sum, zip(pos, DIRECTIONS[step])))
return pos
def get_neighbours(tile):
return map(lambda p: tuple(map(sum, zip(*p))), zip(DIRECTIONS.values(), repeat(tile)))
def game_of_life(black_tiles, days=100):
for _ in range(days):
new_tiles = set()
neighbour_count = defaultdict(int)
for tile in black_tiles:
neighbours = set(get_neighbours(tile))
black_neighbours = len(black_tiles & neighbours)
for neighbour in neighbours:
if neighbour not in black_tiles:
neighbour_count[neighbour] += 1
if black_neighbours in [1, 2]:
new_tiles.add(tile)
for white_tile, black_neighbours in neighbour_count.items():
if black_neighbours == 2:
new_tiles.add(white_tile)
black_tiles = new_tiles
return black_tiles
def solve(references):
black_tiles = set()
for reference in references:
tile = find_tile(reference)
black_tiles ^= { tile }
return len(black_tiles), len(game_of_life(black_tiles))
references = open('input', 'r').read().split('\n')
part_1, part_2 = solve(references)
print(f"{part_1} tiles are left with the black side up.")
print(f"{part_2} tiles are black after 100 days.")
| jonassjoh/AdventOfCode | 2020/24/day24.py | day24.py | py | 1,682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
}
] |
22840825846 | import cv2
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, AffineTransform
import numpy as np
class FeatureExtractor(object):
def __init__(self, orbParam):
self.kpData = []
self.orb = cv2.ORB_create(orbParam)
def computeKpData(self, img):
kp, des = self.orb.detectAndCompute(img,None)
return [kp,des]
def getMatchingPoints(kpDataIdx01,kpDataIdx_02):
return ExtractMatchingInliers(kpData[kpDataIdx01],kpData[kpDataIdx02])
def ExtractMatchingInliers(self, srcImgKpData, dstImgKpData):
#Matching
prevImg = srcImgKpData
curImg = dstImgKpData
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(curImg[1],prevImg[1], k=2)
#Filtering
# Lowe's Ratio test
good = []
for m, n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
src_pts = np.float32([ prevImg[0][m.trainIdx].pt for m in good ]).reshape(-1, 2)
dst_pts = np.float32([ curImg[0][m.queryIdx].pt for m in good ]).reshape(-1, 2)
# Ransac
model, inliers = ransac(
(src_pts, dst_pts),
AffineTransform, min_samples=4,
residual_threshold=8, max_trials=100
)
#Format Output
matchingInliers = []
src_pts_inliers = []
dst_pts_inliers = []
index = 0
for i in inliers:
if(i):
matchingInliers.append([src_pts[index],dst_pts[index]])
src_pts_inliers.append(src_pts[index])
dst_pts_inliers.append(dst_pts[index])
index+=1
src_pts_inliers = np.array(src_pts_inliers)
dst_pts_inliers = np.array(dst_pts_inliers)
return matchingInliers,src_pts_inliers,dst_pts_inliers
| naurunnahansa/SLAM_implementation | featureExtractor.py | featureExtractor.py | py | 1,856 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.ORB_create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.BFMatcher",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_HAMMING",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
... |
44649010863 | #!/usr/bin/env python
# coding: utf-8
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import os
import pandas as pd
from divexplorer_generalized.FP_Divergence import FP_Divergence
DATASET_DIRECTORY = os.path.join(os.path.curdir, "datasets")
# # Import data
def abbreviateValue(value, abbreviations={}):
for k, v in abbreviations.items():
if k in value:
value = value.replace(k, v)
#TODO
if value[0:2] not in ["q_", "u_"]:
value = value.replace("_", " ")
return value
def abbreviate_dict_value(input_dict, abbreviations):
conv ={}
for k1, dict_i in input_dict.items():
conv[k1] = { abbreviateValue(k, abbreviations): d for k, d in dict_i.items()}
return conv
def get_predefined_color_labels(abbreviations = {}):
color_labels = {}
color_labels[abbreviateValue(f'entropy_base', abbreviations)]="#7fcc7f"
color_labels[abbreviateValue(f'divergence_criterion_base', abbreviations)]="#009900"
color_labels[abbreviateValue(f'entropy_generalized', abbreviations)]="mediumblue"
color_labels[abbreviateValue(f'divergence_criterion_generalized', abbreviations)]="orangered"
color_labels[abbreviateValue(f'entropy_base_pruned', abbreviations)]="yellow"
color_labels[abbreviateValue(f'divergence_criterion_base_pruned', abbreviations)]="#C179EE"
color_labels[abbreviateValue(f'entropy_generalized_pruned', abbreviations)]="gray"
color_labels[abbreviateValue(f'divergence_criterion_generalized_pruned', abbreviations)]="#C01FB1"
return color_labels
def run_pruning_experiemnt(dataset_name = 'wine', min_support_tree = 0.1,
min_sup_divergences = [0.1, 0.15, 0.2],
type_criterion="divergence_criterion",
type_experiment = "one_at_time",
metric = "d_fpr",
save = True,
output_dir = 'output_results_2',
saveFig = True,
dataset_dir = DATASET_DIRECTORY):
print(dataset_name)
print(min_sup_divergences)
print(output_dir)
if dataset_name == 'wine':
from import_process_dataset import import_process_wine, train_classifier_kv
df, class_map, continuous_attributes = import_process_wine()
# # Train and predict with RF classifier
df = train_classifier_kv(df)
elif dataset_name== "compas":
from import_process_dataset import import_compas
df, class_map, continuous_attributes = import_compas()
elif dataset_name== "adult":
from import_process_dataset import import_process_adult
df, class_map, continuous_attributes = import_process_adult()
from sklearn.preprocessing import LabelEncoder
attributes = df.columns.drop("class")
X = df[attributes].copy()
y = df["class"].copy()
encoders = {}
for column in attributes:
if df.dtypes[column] == np.object:
print(column)
le = LabelEncoder()
X[column] = le.fit_transform(df[column])
encoders[column] = le
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
cv = StratifiedKFold(n_splits=10, random_state=42, shuffle=True
) # Added to fix the random state #Added shuffle=True for new version sklearn, Value Error
y_predicted = cross_val_predict(clf, X, y.values, cv=cv)
df["predicted"] = y_predicted
else:
raise ValueError()
# # Tree divergence
true_class_name = "class"
pred_class_name = "predicted"
cols_c = [true_class_name, pred_class_name]
df_analyze = df.copy()
from tree_discretization import TreeDiscretization
tree_discr = TreeDiscretization()
# ## Extract tree
generalization_dict, discretizations = tree_discr.get_tree_discretization(
df_analyze,
type_splitting=type_experiment,
min_support=min_support_tree,
metric=metric,
class_map=class_map,
continuous_attributes=list(continuous_attributes),
class_and_pred_names=cols_c,
storeTree=True,
type_criterion=type_criterion,
#minimal_gain = 0.0015
)
# # Extract patterns
out_support = {}
out_time = {}
out_fp = {}
from utils_extract_divergence_generalized import (
extract_divergence_generalized,
)
import time
for apply_generalization in [False, True]:
type_gen = 'generalized' if apply_generalization else 'base'
print(type_gen)
for keep in [True, False]:
if keep:
keep_items = tree_discr.get_keep_items_associated_with_divergence()
keep_str = "_pruned"
else:
keep_items = None
keep_str = ""
print(keep_str)
for min_sup_divergence in min_sup_divergences:
print(min_sup_divergence, end = " ")
s_time = time.time()
FP_fm = extract_divergence_generalized(
df_analyze,
discretizations,
generalization_dict,
continuous_attributes,
min_sup_divergence=min_sup_divergence,
apply_generalization=apply_generalization,
true_class_name=true_class_name,
predicted_class_name=pred_class_name,
class_map=class_map,
metrics_divergence = [metric],
FPM_type="fpgrowth",
save_in_progress = False,
keep_only_positive_divergent_items=keep_items
)
key = type_gen + keep_str
out_time.setdefault(min_sup_divergence, {})[key] = time.time()-s_time
print(f"({(time.time()-s_time):.2f})")
fp_divergence_i = FP_Divergence(FP_fm, metric=metric)
most_divergent = (
fp_divergence_i.getDivergence(th_redundancy=0)
.sort_values(
[fp_divergence_i.metric, fp_divergence_i.t_value_col], ascending=False
)
.head(1)
)
out_support.setdefault(min_sup_divergence, {})[key] = most_divergent
out_fp.setdefault(min_sup_divergence, {})[key] = len(FP_fm)
import os
output_fig_dir = os.path.join(os.path.curdir, output_dir, "figures", "output_performance")
if saveFig:
from pathlib import Path
Path(output_fig_dir).mkdir(parents=True, exist_ok=True)
abbreviations = {"one_at_time":"indiv t.", \
"divergence_criterion":"g$\\Delta$", "entropy":"entr"}
color_labels = get_predefined_color_labels(abbreviations)
lines_style = {k:"-" for k in color_labels}
lines_style.update({k:"--" for k in color_labels if( "base" in k and abbreviations["entropy"] in k)})
lines_style.update({k:"-." for k in color_labels if( 'base' in k and abbreviations["divergence_criterion"] in k)})
size_fig = (3,3)
from utils_plot import plotDicts
out_support_max = {}
for sup in sorted(out_support.keys()):
out_support_max[sup] = {}
for type_gen in out_support[sup]:
out_support_max[sup][type_gen] = out_support[sup][type_gen][metric].iloc[0]
for info_i, results in [('time', out_time), (f"max_{metric}", out_support_max), ('FP', out_fp)]:
info_plot = {}
for sup in sorted(results.keys()):
for type_gen in results[sup]:
type_gen_str = abbreviateValue(f"{type_criterion}_{type_gen}", abbreviations)
if type_gen_str not in info_plot:
info_plot[type_gen_str] = {}
info_plot[type_gen_str][sup] = results[sup][type_gen]
figure_name = os.path.join(output_fig_dir, f"{dataset_name}_stree_{min_support_tree}_{metric}_{info_i}.pdf")
title, ylabel = '', ''
if info_i == 'time':
title = 'Execution time'
ylabel="Execution time $(seconds)$"
elif info_i == f"max_{metric}":
ylabel="Max $\\Delta_{FPR}$"
title="Highest $\\Delta_{FPR}$"
elif info_i == 'FP':
ylabel="#FP"
title="#FP"
plotDicts(info_plot, marker=True, \
title = title, sizeFig=size_fig,\
linestyle=lines_style, color_labels=color_labels, \
xlabel="Minimum support s",ylabel=ylabel , labelSize=10.2,\
outside=False, saveFig=saveFig, nameFig = figure_name)
# # Store performance results
if save:
import os
output_results = os.path.join(os.path.curdir, output_dir, 'performance')
from pathlib import Path
Path(output_results).mkdir(parents=True, exist_ok=True)
conf_name = f"{dataset_name}_{metric}_{type_criterion}_{min_support_tree}"
import json
with open(os.path.join(output_results, f'{conf_name}_time.json'), 'w') as output_file:
output_file.write(json.dumps(out_time))
import json
with open(os.path.join(output_results, f'{conf_name}_fp.json'), 'w') as output_file:
output_file.write(json.dumps(out_fp))
out_support_max = {}
for sup in sorted(out_support.keys()):
out_support_max[sup] = {}
for type_gen in out_support[sup]:
out_support_max[sup][type_gen] = out_support[sup][type_gen][metric].iloc[0]
with open(os.path.join(output_results, f'{conf_name}_div.json'), 'w') as output_file:
output_file.write(json.dumps(out_support_max))
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name_output_dir",
default="output_red",
help="specify the name of the output folder",
)
parser.add_argument(
"--dataset_dir",
default=DATASET_DIRECTORY,
help="specify the dataset directory",
)
parser.add_argument(
"--no_show_figs",
action="store_true",
help="specify not_show_figures to vizualize the plots. The results are stored into the specified outpur dir.",
)
parser.add_argument(
"--dataset_name",
default="wine",
help="specify the name of the dataset",
)
parser.add_argument(
"--min_support_tree",
type=float,
default=0.1,
help="specify the name of the dataset",
)
parser.add_argument(
"--min_sup_divergences",
type=float,
nargs="*",
default=[0.15, 0.2],
help="specify the minimum support scores",
)
parser.add_argument(
"--type_criterion",
type=str,
default="divergence_criterion",
help="specify the split criterion",
)
parser.add_argument(
"--metric",
type=str,
default='d_fpr',
help="specify the metric",
)
args = parser.parse_args()
run_pruning_experiemnt(min_support_tree = args.min_support_tree,
min_sup_divergences = args.min_sup_divergences,
type_criterion=args.type_criterion,
metric = args.metric,
#save = True,
#saveFig = True,
dataset_name = args.dataset_name,
output_dir=args.name_output_dir,
dataset_dir=args.dataset_dir,
) | elianap/h-divexplorer | run_experiments_pruning.py | run_experiments_pruning.py | py | 11,632 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "import_process_datas... |
20112154408 | import argparse
import tflearn
import numpy as np
from processsing import Processing
from training import Training
class Prediction():
def __init__(self):
# Construct the Neural Network classifier and start the learning phase
training = Training()
net = training.buildNN()
self.model = tflearn.DNN(net, tensorboard_verbose=0)
self.LABEL = ['focus', 'distract']
def predict(self, data):
self.model.load('./DNN.tflearn', weights_only=True)
X = np.array(data)[:, :-2]
predictions = self.model.predict(X)
return predictions
def getMostProbableLabel(self, prediction):
result = np.where(prediction == np.amax(prediction))
return self.LABEL[result[0][0]]
if __name__ == '__main__':
# Get argument parser
parser = argparse.ArgumentParser(description='Chain of focus detection using human pose detection')
parser.add_argument('--path', type=str, default='../openPoseDatasetPredict/', help='Path to input json dataset')
args = parser.parse_args()
## Start detection chain for predictions
# Concat all the positions data into a single array and save it as pickle file
process = Processing()
data = process.createInputMatrix(args.path)
data = process.standardise(data)
# Prediction
prediction = Prediction()
predictions = prediction.predict(data)
for index, pred in enumerate(predictions):
print('Personne n°' + str(index) + ' is ' + prediction.getMostProbableLabel(pred))
| Pierre-Assemat/DeepPoseIdentification | predictions/WorkInProgress/prediction_tflearn.py | prediction_tflearn.py | py | 1,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "training.Training",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "training.buildNN",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tflearn.DNN",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"lin... |
17364039643 | # Imports from Third Party Modules
from bs4 import BeautifulSoup
from urllib2 import urlopen
BASE_URL = "http://www.portlandhikersfieldguide.org"
REGIONS = ['Gorge', 'Mount Hood', 'Central OR', 'OR Coast', 'East OR',
'South OR', 'Portland', 'SW WA', 'WA Coast']
REGION_INDEXS = [
'http://www.portlandhikersfieldguide.org/wiki/Category:Columbia_River_Gorge', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Category:Mount_Hood_Area',
'http://www.portlandhikersfieldguide.org/wiki/Category:Central_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Oregon_Coast',
'http://www.portlandhikersfieldguide.org/wiki/Category:Eastern_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Southern_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Portland',
'http://www.portlandhikersfieldguide.org/wiki/Category:Southwest_Washington', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Category:Washington_Coast'
]
EXCLUDE_LINKS = [
'http://www.portlandhikersfieldguide.org/wiki/148th_Avenue_Trailhead',
'http://www.portlandhikersfieldguide.org/wiki/Quartz_Creek_Trailhead',
'http://www.portlandhikersfieldguide.org/wiki/Jefferson_Park_from_South_Breitenbush_Trailhead', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Latourell_Falls_Trailhead',
]
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, "lxml")
def get_region_pages(region_url_list):
soup = make_soup(region_url_list[-1])
pages = soup.find('div', id='mw-pages')
region_links = [BASE_URL + a['href'] for a in pages.findAll('a', limit=2)]
for link in region_links:
if not link.find('pagefrom') == -1:
region_url_list.append(link)
get_region_pages(region_url_list)
return region_url_list
def create_regions_dict():
region_dict = {}
for i in range(0, len(REGIONS)):
region_list = get_region_pages([REGION_INDEXS[i]])
region_dict[REGIONS[i]] = region_list
return region_dict
def get_trailhead_links(section_url):
soup = make_soup(section_url)
content = soup.find('div', 'mw-content-ltr')
hike_links = [BASE_URL + li.a['href'] for li in content.findAll('li')]
trailhead_links = []
for hike in hike_links:
if hike.endswith('Trailhead') and hike not in EXCLUDE_LINKS:
trailhead_links.append(hike)
return trailhead_links
def get_trailhead_details(section_url):
soup = make_soup(section_url)
content = soup.find('div', 'mw-content-ltr')
trailhead_name = soup.find('h1').string
hikes_here = soup.find('span', 'mw-headline')
hikes_here = hikes_here.findNext('ul')
hike_links = [BASE_URL + li.a['href'] for li in hikes_here.findAll('li')]
lat_long = [li.string for li in content.findAll('li', limit=2)]
good_hike_links = []
for hike in hike_links:
if hike.find('.php') == -1 and hike.find('usda') == -1:
good_hike_links.append(hike)
return trailhead_name, lat_long, good_hike_links
def get_hike_details(section_url):
soup = make_soup(section_url)
hike_name = soup.find('h1').string
hikes_here = hike_name.findNext('ul').findNext('ul')
hike_details = [li.string for li in hikes_here.findAll('li')]
return hike_name, hike_details
def write_to_file(filename, dict):
f = open(filename, 'w')
for key, value in dict.items():
try:
print(key)
f.write("\n{}\t{}".format(key, value))
except BaseException:
f.write("\nunicode error")
f.close()
# initialize dictionary variables to links for next stage
trailhead_links_dict = {}
hike_links_dict = {}
# initialize dictionary variables to hold date for each section to send to file
region_count_dict = {}
trailheads_dict = {}
hike_details_dict = {}
# compile all the links for regional sub pages
region_dict = create_regions_dict()
# follow all region sub page links to gather links to trailheads and get
# count of trailheads per region.
for key, value in region_dict.items():
trailhead_links = []
for link in value:
links = get_trailhead_links(link)
trailhead_links += links
trailhead_links_dict[key] = trailhead_links
region_count_dict[key] = len(trailhead_links)
# follow all trailhead links by region to get hike links and trailhead details
# (lat/long, count of hikes).
for key, value in trailhead_links_dict.items():
for link in value:
if link not in EXCLUDE_LINKS:
print(link)
name, coords, hikes = get_trailhead_details(link)
hike_links_dict[name] = hikes
trailheads_dict[name] = (key, coords, len(hikes))
# follow all hike links by trailhead to get details for each hike
for key, value in hike_links_dict.items():
for link in value:
if link not in EXCLUDE_LINKS:
print(key, link)
name, details = get_hike_details(link)
hike_details_dict[name] = (key, details)
write_to_file('hikedetails', hike_details_dict)
write_to_file('trailheads', trailheads_dict)
| RAINSoftwareTech/hiketheplanet | backend/datascrape.py | datascrape.py | py | 5,124 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "urllib2.urlopen",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
}
] |
74332806822 | from time import sleep
import traceback
from django.forms import model_to_dict
from django.shortcuts import redirect, render
from .models import Transaction
from .form.CreateTransactionForm import CreateTransactionForm
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from FinApp.decorators import basic_auth
import json
from django.core import serializers
from django.shortcuts import render, redirect
import pandas as pd
import os
from django.core.files.storage import FileSystemStorage
from Transaction.models import Transaction
from .form.UploadTransactionForm import UploadTransactionForm
from FinApp.settings import BASE_DIR
from django.contrib import messages
from .constants import (
START_DATE_QUERY_PARAM,
END_DATE_QUERY_PARAM,
TRANSACTION_ID_VAR,
INCOME_TABLE_HEADER,
EXPENSE_TABLE_HEADER
)
from Budget.models import(
Category
)
from django.contrib import messages
# Create your views here.
def transaction_view(request):
if request.user.is_authenticated:
if request.method == 'GET':
start_date = request.GET.get(START_DATE_QUERY_PARAM, None)
end_date = request.GET.get(END_DATE_QUERY_PARAM, None)
context = {'income_table_header': INCOME_TABLE_HEADER, 'expense_table_header': EXPENSE_TABLE_HEADER}
if not start_date and not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user)
context['range'] = 'Any'
# return JsonResponse({'range': 'Any', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not start_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, end_date=tuple(end_date.split('-')))
context['range'] = f'Before {end_date}'
# return JsonResponse({'range': f'Before {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')))
context['range'] = f'After {start_date}'
# return JsonResponse({'range': f'After {start_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
else:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')), end_date=tuple(end_date.split('-')))
context['range'] = f'From {start_date} to {end_date}'
# return JsonResponse({'range': f'From {start_date} to {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
context['transactions'] = [data["fields"] for data in serialized_data]
#context['transactions'] = [model_to_dict(transaction) for transaction in query_set]
# print(request.user, context)
return render(request, 'transaction.html', context)
if request.method == 'POST':
if request.method == 'POST':
if 'myfile' in request.FILES:
fs = FileSystemStorage()
user = request.user
myfile = request.FILES['myfile']
filename = fs.save(myfile.name, myfile)
file_path = os.path.join(BASE_DIR, fs.url(filename)[1:])
data = pd.read_csv(file_path, header = 0)
#Checking for correct headers
required_headers = ["category", "type", "amount", "description", "date"]
actual_headers = data.columns.values.tolist()
missing_headers = list(set(required_headers).difference(set(actual_headers)))
error_headers = list(set(actual_headers).difference(set(required_headers)))
if len(missing_headers) > 0:
messages.error(request, "Missing columns are: {}".format(missing_headers))
if len(error_headers) > 0:
messages.error(request, "Columns: {}, do not exist for Transacation Model!".format(error_headers))
return redirect("/transactions/")
data_dict = data.to_dict(orient='records')
i = 0
for row in data_dict:
form = UploadTransactionForm(request.user, **row)
if form.is_valid():
new_transaction = Transaction.transaction_manager.create_transaction(
user=request.user,
**form.cleaned_data
)
else:
messages.error(request, "Row {} has some errors! ".format(i))
messages.error(request, form.errors)
return redirect("/transactions/")
i+=1
messages.success(request, "Upload Successful!" )
return redirect("/transactions/")
else:
return redirect('/login')
@csrf_exempt
@basic_auth
def get_transactions(request, start_date: str = None, end_date: str = None):
if request.user.is_authenticated:
if request.method == 'GET':
start_date = request.GET.get(START_DATE_QUERY_PARAM, None)
end_date = request.GET.get(END_DATE_QUERY_PARAM, None)
context = {'income_table_header': INCOME_TABLE_HEADER, 'expense_table_header': EXPENSE_TABLE_HEADER}
if not start_date and not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user)
context['range'] = 'Any'
# return JsonResponse({'range': 'Any', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not start_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, end_date=tuple(end_date.split('-')))
context['range'] = f'Before {end_date}'
# return JsonResponse({'range': f'Before {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')))
context['range'] = f'After {start_date}'
# return JsonResponse({'range': f'After {start_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
else:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')), end_date=tuple(end_date.split('-')))
context['range'] = f'From {start_date} to {end_date}'
# return JsonResponse({'range': f'From {start_date} to {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
context['transactions'] = [data["fields"] for data in serialized_data]
#context['transactions'] = [model_to_dict(transaction) for transaction in query_set]
# print(request.user, context)
return JsonResponse(context, status=201)
return render(request, 'transaction.html', context)
else:
return redirect('/login')
@csrf_exempt
@basic_auth
def create_transaction(request):
if request.user.is_authenticated:
if request.method == 'POST':
form_data = CreateTransactionForm.map_fields(request.POST.dict())
form = CreateTransactionForm(request.user, **form_data)
if form.is_valid():
new_transaction = Transaction.transaction_manager.create_transaction(
user=request.user,
**form.cleaned_data
)
context = model_to_dict(new_transaction)
return JsonResponse(context, status=201)
else:
return JsonResponse({'message': 'Failed to create transaction', 'field_errors': CreateTransactionForm.map_fields(form.errors, reverse=True)}, status=422)
elif request.method == 'GET':
categories = Category.category_manager.get_categories(user=request.user).filter(is_active = True)
context = {'categories': [model_to_dict(category)['name'] for category in categories]}
return JsonResponse(context, status=201)
else:
return redirect('login')
@csrf_exempt
@basic_auth
def delete_transaction(request):
if request.user.is_authenticated:
if request.method == 'POST':
id = request.POST.get(TRANSACTION_ID_VAR)
try:
Transaction.transaction_manager.delete_transaction(user=request.user, id=id)
return JsonResponse({},status=201)
except Exception as e:
return JsonResponse({'non_field_errors': 'Failed to delete transaction'}, status=422)
else:
return redirect('login')
@csrf_exempt
@basic_auth
def update_transaction(request, id: str=None):
if request.user.is_authenticated:
if request.method == 'POST':
transaction_id = request.POST.get(TRANSACTION_ID_VAR)
print(f"post: {transaction_id}")
try:
form_data = CreateTransactionForm.map_fields(request.POST.dict())
form = CreateTransactionForm(request.user, **form_data)
if form.is_valid():
updated_transaction = Transaction.transaction_manager.update_transaction(
user=request.user,
id=transaction_id,
**form.cleaned_data
)
if not updated_transaction:
return JsonResponse({'non_field_errors': 'Invalid Transaction'}, status=422)
else:
return JsonResponse(model_to_dict(updated_transaction), status=201)
else:
return JsonResponse({'field_errors': CreateTransactionForm.map_fields(form.errors, reverse=True)}, status=422)
except Exception as e:
print(traceback.format_exc())
return JsonResponse({'non_field_errors': 'Failed to update transaction. Contact Administrator'}, status=500)
elif request.method == 'GET':
transaction_id = request.GET.get(TRANSACTION_ID_VAR)
print(f"get: {transaction_id}")
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, id=transaction_id)
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
categories = Category.category_manager.get_categories(user=request.user)
context = {
'transaction': CreateTransactionForm.map_fields(serialized_data[0]['fields'], reverse=True),
'categories': [model_to_dict(category)['name'] for category in categories]
}
return JsonResponse(context, status=200)
return render(request, "", context)
else:
return redirect('login')
| edwinlowxh/CZ3002---Advanced-Software-Engineering | FinApp/Transaction/views.py | views.py | py | 12,542 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "constants.START_DATE_QUERY_PARAM",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "constants.END_DATE_QUERY_PARAM",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "constants.INCOME_TABLE_HEADER",
"line_number": 50,
"usage_type": ... |
34116401936 | '''
Detects a grid.
'''
import ujson
import cv2
from math import floor
from operator import itemgetter
from argparse import ArgumentParser
from pytesseract import image_to_string
parser = ArgumentParser(description = 'Detect grid.')
parser.add_argument(
"-f",
"--filename",
dest = "filename",
help = "filename prefix of images"
)
parser.add_argument(
"-d",
"--dev",
dest = "dev",
help = "run in development mode (preview image)",
action = "store_true"
)
parser.add_argument(
"--parts",
dest = "parts",
help = "which parts to read",
nargs = "+",
default = ["grid", "rides", "day", "roads"]
)
args = parser.parse_args()
dev = args.dev
parts = args.parts
image = cv2.imread("screen.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(imageHeight, imageWidth, channels) = image.shape
def hasTheRightAspectRatio(boundingRect):
[x, y, w, h] = boundingRect
return round((w / h) * 10) == 12 or round((h / w) * 10) == 12
def isBigEnough(boundingRect):
[x, y, w, h] = boundingRect
return w > round(imageHeight / 30) and w < round(imageHeight / 5)
def topIsBrighterThanBottom(boundingRect, image):
[x, y, w, h] = boundingRect
centerX = x + (w / 2)
centerY = y + (h / 2)
centerYAbove = centerY - (h / 6)
centerYBelow = centerY + (h / 6)
pixelAbove = image[round(centerYAbove), round(centerX)]
pixelBelow = image[round(centerYBelow), round(centerX)]
return sum(pixelAbove) > sum(pixelBelow)
def isAHouse(boundingRect, image):
return (
hasTheRightAspectRatio(boundingRect) and
isBigEnough(boundingRect) and
topIsBrighterThanBottom(boundingRect, image)
)
def drawBoundingRects(image, contours):
for contour in contours:
[contourX, contourY, w, h] = contour
point1 = (contourX, contourY)
point2 = (contourX + w, contourY + h)
cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
def cellFromBuilding(contour, offset, scale):
[contourX, contourY, w, h] = contour
[offsetX, offsetY] = offset
[scaleX, scaleY] = scale
newX = (contourX - (w * offsetX))
newY = (contourY - (h * offsetY))
newW = scale * w
return {"x": newX, "y": newY, "width": newW}
def topLeftCellFromSample(cell, imageWidth, imageHeight, borders):
x, y, width = itemgetter("x", "y", "width")(cell)
topBorder, leftBorder = borders
widthPadding = (imageWidth * leftBorder)
heightPadding = (imageHeight * topBorder)
usableWidth = x - widthPadding
usableHeight = y - heightPadding
newX = x - (floor(usableWidth / width) * width)
newY = y - (floor(usableHeight / width) * width)
return {"x": newX, "y": newY, "width": width}
def bottomRightCellFromSample(cell, imageWidth, imageHeight, borders):
x, y, width = itemgetter("x", "y", "width")(cell)
bottomBorder, rightBorder = borders
remainingWidth = imageWidth - x - width
remainingHeight = imageHeight - y - width
widthPadding = (imageWidth * rightBorder)
heightPadding = (imageHeight * bottomBorder)
usableWidth = remainingWidth - widthPadding
usableHeight = remainingHeight - heightPadding
newX = x + ((floor(usableWidth / width)) * width)
newY = y + ((floor(usableHeight / width)) * width)
return {"x": newX, "y": newY, "width": width}
def getCells(contour, cellSettings, borderSettings):
[contourX, contourY, w, h] = contour
if w < h:
w, h = h, w
[offset, scale] = cellSettings
sampleCell = cellFromBuilding([contourX, contourY, w, h], offset, scale)
[
topBorder,
bottomBorder,
leftBorder,
rightBorder,
] = borderSettings
topLeftBorder = [topBorder, leftBorder]
topLeftCell = topLeftCellFromSample(sampleCell, imageWidth, imageHeight, topLeftBorder)
bottomRightBorder = [bottomBorder, rightBorder]
bottomRightCell = bottomRightCellFromSample(sampleCell, imageWidth, imageHeight, bottomRightBorder)
return [sampleCell, topLeftCell, bottomRightCell]
def drawCell(image, cell):
x, y, width = itemgetter("x", "y", "width")(cell)
point1 = tuple(map(round, (x, y)))
point2 = tuple(map(round, (x + width, y + width)))
cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
def drawAll(image, cellSettings, borderSettings, contours):
for contour in contours:
cells = getCells(contour, cellSettings, borderSettings)
for cell in cells:
drawCell(image, cell)
def arithmeticMean(numbers):
return sum(numbers) / len(numbers)
def reciprocal(number):
return 1 / number
def reciprocalSum(numbers):
return sum(list(map(reciprocal, numbers)))
def harmonicMean(numbers):
return len(numbers) / reciprocalSum(numbers)
def selectPart(collection, i):
return list(map(lambda c: c[i], collection))
def getAverageCells(cells):
keys = ["x", "y", "width"]
indices = [1, 2]
if len(cells) == 0:
return [
{
"x": 1,
"y": 1,
"width": 10
},
{
"x": 1,
"y": 1,
"width": 10
}
]
return [
{
key: harmonicMean(selectPart(selectPart(cells, i), key)) for key in keys
} for i in indices
]
def drawAverage(image, cellSettings, borderSettings, contours):
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
averageCells = getAverageCells(cells)
# print(cells)
# print(averageCells)
# drawCell(image, cells[0]) # sampleCell
drawCell(image, averageCells[0]) # average topLeftCell
drawCell(image, averageCells[1]) # average bottomRightCell
def getGrid(topLeftCell, bottomRightCell):
topLeftX, topLeftY, width = itemgetter("x", "y", "width")(topLeftCell)
bottomRightX, bottomRightY = itemgetter("x", "y", "width")(bottomRightCell)[:2]
gridWidth = (bottomRightX + width) - topLeftX
gridHeight = (bottomRightY + width) - topLeftY
return list(
map(
lambda y: list(
map(
lambda x: {"x": round(x), "y": round(y), "width": round(width)},
list(range(round(topLeftX), round(gridWidth + width), round(width)))
)
),
list(range(round(topLeftY), round(gridHeight + width), round(width)))
)
)
def drawGrid(image, grid):
for row in grid:
for cell in row:
drawCell(image, cell)
def linearConversion(oldRange, newRange, value):
[oldMin, oldMax] = oldRange
[newMin, newMax] = newRange
return (((value - oldMin) * (newMax - newMin)) / (oldMax - oldMin)) + newMin
sliderMax = 100
windowTitle = "slider"
defaultOffsetX = 120 / 1000
defaultOffsetY = 120 / 1000
defaultScaleX = 660 / 1000
defaultScaleY = 660 / 1000
defaultTopBorder = linearConversion([480, 1050], [15, 50], imageHeight) / 1000
defaultBottomBorder = linearConversion([480, 1050], [120, 140], imageHeight) / 1000
defaultLeftBorder = 30 / 1000
defaultRightBorder = 30 / 1000
borders = [
["top", defaultTopBorder],
["bottom", defaultBottomBorder],
["left", defaultLeftBorder],
["right", defaultRightBorder]
]
borderTrackbarNamesAndDefaults = list(map(lambda d: [f"{d[0]}Border", d[1]], borders))
def getSettings():
[
offsetX,
offsetY,
scaleX,
scaleY,
topBorder,
bottomBorder,
leftBorder,
rightBorder
] = [
defaultOffsetX,
defaultOffsetY,
defaultScaleX,
defaultScaleY,
defaultTopBorder,
defaultBottomBorder,
defaultLeftBorder,
defaultRightBorder
]
cellSettings = [[offsetX, offsetY], [scaleX, scaleY]]
borderSettings = [topBorder, bottomBorder, leftBorder, rightBorder]
return [cellSettings, borderSettings]
def cropCell(image, cell):
x, y, width = itemgetter("x", "y", "width")(cell)
crop = image.copy()[y:y + width, x:x + width]
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
(cropHeight, cropWidth, cropChannels) = crop.shape
targetWidth = 64
crop = cv2.resize(
crop,
(targetWidth, targetWidth),
cv2.INTER_AREA if cropWidth > targetWidth else cv2.INTER_LINEAR
)
return crop.tolist()
def cropRides(image):
return image.copy()[
round(imageHeight / 40):round(imageHeight / 15),
round(imageWidth - (imageWidth / 4)):round(imageWidth - imageWidth / 8.7)
]
def getRides(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedRides = cropRides(image)
(ret, croppedRides) = cv2.threshold(croppedRides, 127, 255, cv2.THRESH_TOZERO)
croppedRides = ~croppedRides
return image_to_string(croppedRides, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropDay(image):
return image.copy()[
round(imageHeight / 40):round(imageHeight / 15),
round(imageWidth - (imageWidth / 12)):round(imageWidth - (imageWidth / 22))
]
def getDay(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedDay = cropDay(image)
(ret, croppedDay) = cv2.threshold(croppedDay, 127, 255, cv2.THRESH_TOZERO)
croppedDay = ~croppedDay
return image_to_string(croppedDay, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropRoads(image):
return image.copy()[
round(imageHeight - (imageHeight / 16)):round(imageHeight - (imageHeight / 21.5)),
round((imageWidth / 2) + (imageWidth / 65)):round((imageWidth / 2) + (imageWidth / 33))
]
def getRoads(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedRoads = cropRoads(image)
(ret, croppedRoads) = cv2.threshold(croppedRoads, 127, 255, cv2.THRESH_TOZERO)
croppedRoads = ~croppedRoads
return image_to_string(croppedRoads, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropGameOver(image):
return image.copy()[
round((imageHeight / 2) - (imageHeight / 3)):round((imageHeight / 2) - (imageHeight / 4)),
round((imageWidth / 2) - (imageWidth / 7)):round((imageWidth / 2) + (imageWidth / 7))
]
def getGameOver(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedGameOver = cropGameOver(image)
(ret, croppedGameOver) = cv2.threshold(croppedGameOver, 127, 255, cv2.THRESH_TOZERO)
croppedGameOver = ~croppedGameOver
return image_to_string(croppedGameOver, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropUpgrade(image):
return image.copy()[
round(
(imageHeight / 2) - (imageHeight / 4.5)
):
round(
(imageHeight / 2) - (imageHeight / 7)
),
round(
(imageWidth / 2) - (imageWidth / 4.5)
):
round(
(imageWidth / 2) + (imageWidth / 10)
)
]
def getUpgrade(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedUpgrade = cropUpgrade(image)
(ret, croppedUpgrade) = cv2.threshold(croppedUpgrade, 127, 255, cv2.THRESH_TOZERO)
croppedUpgrade = ~croppedUpgrade
return image_to_string(croppedUpgrade, *tesseractConfig).replace("\n", "").replace("\f", "")
def detectGrid(value=0):
[cellSettings, borderSettings] = getSettings()
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
averageCells = getAverageCells(cells)
grid = getGrid(*averageCells)
if dev:
print(cellSettings)
print(borderSettings)
image = cv2.imread("screen.png")
# drawGrid(image, grid)
drawBoundingRects(image, contours)
[cellSettings, borderSettings] = getSettings()
drawAverage(image, cellSettings, borderSettings, contours)
drawAll(image, cellSettings, borderSettings, contours)
return grid
def getContours(image):
div = 32
quantized = image // div * div + div // 2
hsv = cv2.cvtColor(quantized, cv2.COLOR_BGR2HSV)
hsvThreshold = cv2.inRange(hsv, (0, 100, 150), (255, 255, 255))
blurred = cv2.GaussianBlur(hsvThreshold, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 255, 1)
# Find contours
contours = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contours = list(map(cv2.boundingRect, contours))
contours = list(filter(lambda b: isAHouse(b, quantized), list(map(list, contours))))
return contours
if dev:
contours = getContours(image)
cv2.namedWindow(windowTitle)
grid = detectGrid()
drawGrid(image, grid)
drawBoundingRects(image, contours)
[cellSettings, borderSettings] = getSettings()
drawAverage(image, cellSettings, borderSettings, contours)
drawAll(image, cellSettings, borderSettings, contours)
cv2.imwrite(f"./images/test/{args.filename}.png", image)
else:
gameOverString = getGameOver(gray)
upgradeString = getUpgrade(gray)
if gameOverString == "Game Over":
print("GAME OVER")
elif upgradeString.startswith("Woche"):
print(upgradeString)
else:
if "grid" in parts:
contours = getContours(image)
[cellSettings, borderSettings] = getSettings()
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
grid = getGrid(*getAverageCells(cells))
gridData = list(
map(
lambda row: list(
map(
lambda cell: {key: cell[key] for key in ["x", "y"]} | {"pixels": cropCell(image, cell)},
row
)
),
grid
)
)
else:
gridData = None
if "rides" in parts:
rides = getRides(gray)
else:
rides = None
if "day" in parts:
day = getDay(gray)
else:
day = None
if "roads" in parts:
roads = getRoads(gray)
else:
roads = None
data = {
"grid": gridData,
"rides": rides,
"day": day,
"roads": roads
}
print(ujson.dumps(data))
| pumpncode/mimoai | read-game.py | read-game.py | py | 12,747 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
... |
74172105702 | import math
import centrosome.outline
import numpy
import numpy.testing
import pytest
import skimage.measure
import skimage.segmentation
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import (
EXPERIMENT,
COLTYPE_FLOAT,
C_LOCATION,
)
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
import plugins.measureobjectintensitymultichannel as momc
IMAGE_NAME = "MyImage"
OBJECT_NAME = "MyObjects"
N_CHANNELS = 4
@pytest.fixture(scope="function")
def image():
return cellprofiler_core.image.Image()
@pytest.fixture(scope="function")
def measurements():
return cellprofiler_core.measurement.Measurements()
@pytest.fixture(scope="function")
def module():
module = momc.MeasureObjectIntensityMultichannel()
module.images_list.value = IMAGE_NAME
module.objects_list.value = OBJECT_NAME
return module
@pytest.fixture(scope="function")
def objects(image):
objects = cellprofiler_core.object.Objects()
objects.parent_image = image
return objects
@pytest.fixture(scope="function")
def workspace(image, measurements, module, objects):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add(IMAGE_NAME, image)
object_set = cellprofiler_core.object.ObjectSet()
object_set.add_objects(objects, OBJECT_NAME)
return cellprofiler_core.workspace.Workspace(
cellprofiler_core.pipeline.Pipeline(),
module,
image_set,
object_set,
measurements,
image_set_list,
)
def test_init():
x = momc.MeasureObjectIntensityMultichannel()
def assert_features_and_columns_match(measurements, module):
object_names = [
x
for x in measurements.get_object_names()
if x
not in (
"Image",
EXPERIMENT,
)
]
features = [
[f for f in measurements.get_feature_names(object_name) if f != "Exit_Status"]
for object_name in object_names
]
columns = module.get_measurement_columns(None)
assert sum([len(f) for f in features]) == len(columns)
for column in columns:
index = object_names.index(column[0])
assert column[1] in features[index]
assert column[2] == COLTYPE_FLOAT
def test_supplied_measurements(module):
"""Test the get_category / get_measurements, get_measurement_images functions"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
expected_categories = tuple(
sorted(
[
momc.INTENSITY,
C_LOCATION,
]
)
)
assert (
tuple(sorted(module.get_categories(None, "MyObjects1"))) == expected_categories
)
assert module.get_categories(None, "Foo") == []
measurements = module.get_measurements(None, "MyObjects1", momc.INTENSITY)
assert len(measurements) == len(momc.ALL_MEASUREMENTS)
measurements = module.get_measurements(None, "MyObjects1", C_LOCATION)
assert len(measurements) == len(momc.ALL_LOCATION_MEASUREMENTS)
assert all([m in momc.ALL_LOCATION_MEASUREMENTS for m in measurements])
assert (
module.get_measurement_images(
None,
"MyObjects1",
momc.INTENSITY,
momc.MAX_INTENSITY,
)
== ["MyImage"]
)
def test_get_measurement_columns(module):
"""test the get_measurement_columns method"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
module.nchannels.value = N_CHANNELS
columns = module.get_measurement_columns(None)
assert len(columns) == N_CHANNELS * 2 * (
len(momc.ALL_MEASUREMENTS) + len(momc.ALL_LOCATION_MEASUREMENTS)
)
for column in columns:
assert column[0] in ("MyObjects1", "MyObjects2")
assert column[2], COLTYPE_FLOAT
category = column[1].split("_")[0]
assert category in (
momc.INTENSITY,
C_LOCATION,
)
if category == momc.INTENSITY:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_MEASUREMENTS
for c in range(N_CHANNELS)
]
else:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_LOCATION_MEASUREMENTS
for c in range(N_CHANNELS)
]
def test_zero(image, measurements, module, objects, workspace):
"""Make sure we can process a blank image"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
objects.segmented = numpy.zeros((10, 10))
module.nchannels.value = N_CHANNELS
module.run(workspace)
for category, features in (
(
momc.INTENSITY,
momc.ALL_MEASUREMENTS,
),
(
C_LOCATION,
momc.ALL_LOCATION_MEASUREMENTS,
),
):
for meas_name in features:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", c + 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 0, (
"Got data for feature %s" % feature_name
)
assert_features_and_columns_match(measurements, module)
def test_masked(image, measurements, module, objects, workspace):
"""Make sure we can process a completely masked image
Regression test of IMG-971
"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
image.mask = numpy.zeros((10, 10), bool)
objects.segmented = numpy.ones((10, 10), int)
module.nchannels.value = N_CHANNELS
module.run(workspace)
for meas_name in momc.ALL_MEASUREMENTS:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
meas_name,
"MyImage",
c + 1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert numpy.all(numpy.isnan(data) | (data == 0))
assert_features_and_columns_match(measurements, module)
def test_one(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
data = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
image.pixel_data = data.astype(float)
objects.segmented = data.astype(int)
module.nchannels.value = 1
module.run(workspace)
for category, meas_name, value in (
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY,
9,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY_EDGE,
8,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY_EDGE,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
0,
),
(
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
1,
),
(
C_LOCATION,
momc.LOC_CMI_X,
3,
),
(
C_LOCATION,
momc.LOC_CMI_Y,
2,
),
):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_one_masked(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
img = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
mask = img > 0
image.pixel_data = img.astype(float)
image.mask = mask
objects.segmented = img.astype(int)
module.run(workspace)
for meas_name, value in (
(momc.INTEGRATED_INTENSITY, 9),
(momc.MEAN_INTENSITY, 1),
(momc.STD_INTENSITY, 0),
(momc.MIN_INTENSITY, 1),
(momc.MAX_INTENSITY, 1),
(momc.INTEGRATED_INTENSITY_EDGE, 8),
(momc.MEAN_INTENSITY_EDGE, 1),
(momc.STD_INTENSITY_EDGE, 0),
(momc.MIN_INTENSITY_EDGE, 1),
(momc.MAX_INTENSITY_EDGE, 1),
(momc.MASS_DISPLACEMENT, 0),
(momc.LOWER_QUARTILE_INTENSITY, 1),
(momc.MEDIAN_INTENSITY, 1),
(momc.MAD_INTENSITY, 0),
(momc.UPPER_QUARTILE_INTENSITY, 1),
):
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_intensity_location(image, measurements, module, objects, workspace):
data = (
numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 2, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
]
).astype(float)
/ 2.0
)
image.pixel_data = data
labels = (data != 0).astype(int)
objects.segmented = labels
module.run(workspace)
for feature, value in (
(momc.LOC_MAX_X, 5),
(momc.LOC_MAX_Y, 2),
):
feature_name = "%s_%s_%s_c%s" % (C_LOCATION, feature, "MyImage", 1)
values = measurements.get_current_measurement(OBJECT_NAME, feature_name)
assert len(values) == 1
assert values[0] == value
def test_mass_displacement(image, measurements, module, objects, workspace):
"""Check the mass displacement of three squares"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_mass_displacement_masked(image, measurements, module, objects, workspace):
"""Regression test IMG-766 - mass displacement of a masked image"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
mask = numpy.zeros(data.shape, bool)
mask[labels > 0] = True
image.pixel_data = data
image.mask = mask
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_quartiles_uniform(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square filled with uniform values"""
labels = numpy.ones((250, 250), int)
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.50, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.75, 2)
def test_quartiles_one_pixel(image, module, objects, workspace):
"""Regression test a bug that occurs in an image with one pixel"""
labels = numpy.zeros((10, 20))
labels[2:7, 3:8] = 1
labels[5, 15] = 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(10, 20))
image.pixel_data = data
objects.segmented = labels
# Crashes when pipeline runs in measureobjectintensity.py revision 7146
module.run(workspace)
def test_quartiles_four_objects(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square with 4 objects"""
labels = numpy.ones((250, 250), int)
labels[125:, :] += 1
labels[:, 125:] += 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
#
# Make the distributions center around .5, .25, 1/6 and .125
#
data /= labels.astype(float)
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 2.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 6.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 8.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 3.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 3.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 3.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 3.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
def test_median_intensity_masked(image, measurements, module, objects, workspace):
numpy.random.seed(37)
labels = numpy.ones((10, 10), int)
mask = numpy.ones((10, 10), bool)
mask[:, :5] = False
pixel_data = numpy.random.uniform(size=(10, 10, N_CHANNELS)).astype(numpy.float32)
pixel_data[~mask, :] = 1
image.pixel_data = pixel_data
image.mask = mask
objects.segmented = labels
expected = [
numpy.sort(pixel_data[mask, c])[numpy.sum(mask) // 2] for c in range(N_CHANNELS)
]
module.nchannels.value = N_CHANNELS
module.run(workspace)
assert isinstance(measurements, cellprofiler_core.measurement.Measurements)
for c, exp in enumerate(expected):
values = measurements.get_current_measurement(
OBJECT_NAME,
"_".join((momc.INTENSITY, momc.MEDIAN_INTENSITY, IMAGE_NAME, f"c{c+1}")),
)
assert len(values) == 1
assert exp == values[0]
| BodenmillerGroup/ImcPluginsCP | tests/test_measureobjectintensitymultichannel.py | test_measureobjectintensitymultichannel.py | py | 20,306 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "cellprofiler_core.image.preferences.set_headless",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.image.preferences",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.image",
"line_number": 24,
"u... |
198052217 | from django.views import generic
from digifarming.models import User, Staff, Rating, \
RequestType, Commodity, Supply, \
Order, OrderItem, UserTrackingMovements, HarvestDispatch,FacilityType, Facility, \
JobTitle, JobShift, ArrivalView, DepartureView, CancellationView, \
TransportCategory, TransportType, TransportItems, Client, ClientType, CustomerTransportation, \
CommodityCategory, CommodityType, CommodityMetric, Commodity, HarvestDispatch
# Hotel, ArrivalView, DepartureView, CancellationView, TodayBookingView, \
# BookingSummaryView, InhouseGuestView, OverBookingView, RoomsOccupiedView, MostUsedFacilityView, \
# LeastUsedFacilityView, AllOrdersListView, Laundry, LaundryType, LaundryItems, FacilityType, CleaningFacilityView, \
# CleaningRoomView, User, Workers, Facilities
# Alerts, AlertType
from operator import itemgetter
from django.db.utils import DatabaseError
from django import http
import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.utils import timezone
from django.contrib import messages
from .forms import JobTitleForm, JobShiftForm, StaffForm, UserUpdateForm, UserForm, LoginForm, \
FacilityForm, FacilityTypeForm, ClientTypeForm, ClientForm, CommodityCategoryForm, CommodityTypeForm, \
CommodityMetricForm, CommodityForm, TransportCategoryForm, TransportTypeForm, TransportItemsForm, \
CustomerTransportationForm, HarvestDispatchForm, OrderItemForm, OrderForm, SupplyForm
# Defining Generic views here.
def parse_update_params(request_params):
result = dict()
pk = request_params['pk']
del request_params['pk']
del request_params['csrfmiddlewaretoken']
if 'name' in request_params and 'value' in request_params:
result[request_params['name']] = request_params['value']
del request_params['value']
del request_params['name']
result.update(**request_params)
return pk, result
def _update_ajax(model_class, request):
if request.method == 'POS,T' and request.is_ajax():
pk, request_params = parse_update_params(request.POST.dict())
model_class.objects.filter(pk=pk).update(**request_params)
return model_class.objects.get(pk=pk)
# calling index page
# Listing all the arrivals in the system
class ArrivalListView(generic.ListView):
template_name = ''
context_object_name = 'arrival_list'
model = ArrivalView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(ArrivalListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
# return ArrivalView.objects.filter(arrival_id=self.kwargs['arrival_id'])
return ArrivalView.objects.order_by('start_date')
# Listing all the departures in the system
class DepartureListView(generic.ListView):
template_name = ''
context_object_name = 'departure_list'
model = DepartureView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(DepartureListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
return DepartureView.objects.order_by('end_date')
# Listing all the cancellations in the system
class CancellationListView(generic.ListView):
template_name = ''
context_object_name = 'guest_list'
model = CancellationView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(CancellationListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
return CancellationView.objects.order_by('booking_date')
# Getting today's summary - all totals
# class TodaySummaryListView(generic.ListView):
# template_name = ''
# context_object_name = 'today_summary_list'
# model = TodayBookingView
# def get_queryset(self):
# return TodayBookingView.objects.all()
# creating a new check in to track users facility usage
def tracking_check_in_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
check_in = UserTrackingMovements()
check_in.user_tracking = request_params.get('user_id')
check_in.user_tracking_facility = request_params.get('facility_id')
check_in.user_tracking_status = request_params.get('status')
check_in.save()
return http.HttpResponse(json.dumps(
{'id': check_in.id, 'checked_in_facility': check_in.facility_id,
'status': check_in.status}), status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. Tracking Check in not created")
# Getting tracking trends - most used facilities
# class MostUsedFacilityListView(generic.ListView):
# template_name = ''
# context_object_name = 'facilities_most_used_list'
# model = MostUsedFacilityView
# def get_queryset(self):
# return MostUsedFacilityView.objects.all()
# Getting tracking trends - least used facilities
# class LeastUsedFacilityListView(generic.ListView):
# template_name = ''
# context_object_name = 'facilities_least_used_list'
# model = LeastUsedFacilityView
# def get_queryset(self):
# return LeastUsedFacilityView.objects.all()
# TODO
# Creating a new order
def add_order_ajax(request, **kwargs):
if request.method == "POST":
form = OrderForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.order_created_by_id = request.user.id
order.save()
messages.success(request, 'Order was added successfully')
return redirect('add-order-item-ajax')
else:
form = OrderForm()
context = {
'form': form
}
return render(request, 'pages/add_order.html', context)
# List all orders
# class AllOrdersListView(generic.ListView):
# template_name = ''
# context_object_name = 'all_orders_list'
# model = AllOrdersListView
# def get_queryset(self):
# return AllOrdersListView.objects.all()
# Update order, cancell or process
def update_order_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = _update_ajax(Order, request)
return http.HttpResponse(
json.dumps({'pk': order.id, 'status': order.order_status, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a order
def delete_order_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = Order.objects.get(pk=request.POST.get('pk'))
order_id = order.id
order.delete()
return http.HttpResponse(
content='order <strong>{}</strong> has been successfully deleted'.format(order_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new order item
def add_order_item_ajax(request, **kwargs):
if request.method == "POST":
form = OrderItemForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.save()
messages.success(request, 'Order Item was added successfully')
return redirect('add-order-item-ajax')
else:
form = OrderItemForm()
context = {
'form': form
}
return render(request, 'pages/add_order_item.html', context)
# List all order items
class AllOrderItemListView(generic.ListView):
template_name = ''
context_object_name = 'all_order_list'
model = OrderItem
def get_queryset(self):
return AllOrderItemListView.objects.all()
# updating order item
def update_order_item_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = _update_ajax(OrderItem, request)
return http.HttpResponse(
json.dumps({'pk': order.id, 'order_name': order.order_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a order item
def delete_order_item_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = OrderItem.objects.get(pk=request.POST.get('pk'))
order_id = order.id
order.delete()
return http.HttpResponse(
content='order <strong>{}</strong> has been successfully deleted'.format(order_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new supply item
def add_supply_ajax(request, **kwargs):
if request.method == "POST":
form = SupplyForm(request.POST)
if form.is_valid():
supply = form.save(commit=False)
supply.supply_created_by_id = request.user.id
supply.save()
messages.success(request, 'Supply was added successfully')
return redirect('add-supply-ajax')
else:
form = SupplyForm()
context = {
'form': form
}
return render(request, 'pages/add_supply.html', context)
# List all supplies
class AllsupplyListView(generic.ListView):
template_name = ''
context_object_name = 'all_supplys_list'
model = Supply
def get_queryset(self):
return AllsupplyListView.objects.all()
# updating supplies
def update_supply_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
supply = _update_ajax(Supply, request)
return http.HttpResponse(
json.dumps({'pk': supply.id, 'supply_commodity': supply.supply_commodity, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting supply
def delete_supply_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
supply = Supply.objects.get(pk=request.POST.get('pk'))
supply_id = supply.id
supply.delete()
return http.HttpResponse(
content='supply <strong>{}</strong> has been successfully deleted'.format(supply_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new staff
def add_worker_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
staff = Staff()
staff.staff_id = request_params.get('worker_id')
staff.staff_user = request_params.get('staff_user')
staff.staff_job_title = request_params.get('staff_job_title')
staff.staff_job_shift = request_params.get('staff_job_shift')
staff.is_hr = request_params.get('is_hr')
# staff.staff_created_by_id = request_params.get('staff_created_by')
staff.save()
return http.HttpResponse(
json.dumps({'id': staff.id, 'staff_id': staff.staff_id}),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. commodity not created")
# List all staff
# class AllWorkersListView(generic.ListView):
# template_name = ''
# context_object_name = 'all_workers_list'
# model = Staff
# def get_queryset(self):
# return AllWorkersListView.objects.all()
# # updating staff
# def update_worker_ajax(request, **kwargs):
# if request.method == 'POST' and request.is_ajax():
# try:
# worker = _update_ajax(Staff, request)
# return http.HttpResponse(
# json.dumps({'pk': staff.id, 'worker_staff': staff.staff_user }),
# status=201)
# except DatabaseError as e:
# return http.HttpResponse(status=400, content='An error occurred while processing your request')
# return http.HttpResponse(status=400)
# # deleting a staff
# def delete_worker_ajax(request, **kwargs):
# if request.method == 'POST' and request.is_ajax():
# try:
# worker = Staff.objects.get(pk=request.POST.get('pk'))
# worker_id = worker.id
# worker.delete()
# return http.HttpResponse(
# content='staff <strong>{}</strong> has been successfully deleted'.format(worker_id), status=200)
# except DatabaseError as e:
# return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new harvest dispatch
def add_harvest_dispatch_ajax(request, **kwargs):
if request.method == "POST":
form = HarvestDispatchForm(request.POST)
if form.is_valid():
harvest_dispatch = form.save(commit=False)
harvest_dispatch.dispatch_to_staff_id = request.user.id
harvest_dispatch.dispatch_created_by_id = request.user.id
harvest_dispatch.save()
messages.success(request, 'Transport dispatch was added successfully')
return redirect('add-harvest-dispatch-ajax')
else:
form = HarvestDispatchForm()
context = {
'form': form
}
return render(request, 'pages/add_harvest_dispatch.html', context)
# List all harvest dispatch
class AllHarvestDispatchListView(generic.ListView):
template_name = ''
context_object_name = 'all_harvest_dispatch_list'
model = HarvestDispatch
def get_queryset(self):
return AllHarvestDispatchListView.objects.all()
# updating harvest dispatch
def update_harvest_dispatch_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
harvest_dispatch = _update_ajax(HarvestDispatch, request)
return http.HttpResponse(
json.dumps({'pk': harvest_dispatch.id, 'dispatch_commodity': harvest_dispatch.dispatch_commodity, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a harvest dispatch
def delete_harvest_dispatch_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
harvest_dispatch = HarvestDispatch.objects.get(pk=request.POST.get('pk'))
harvest_dispatch_id = harvest_dispatch.id
harvest_dispatch.delete()
return http.HttpResponse(
content='harvest_dispatch <strong>{}</strong> has been successfully deleted'.format(harvest_dispatch_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new customer transportation
def add_customer_transportation_ajax(request, **kwargs):
if request.method == "POST":
form = CustomerTransportationForm(request.POST)
if form.is_valid():
customer_transportation = form.save(commit=False)
customer_transportation.customer_created_by_id = request.user.id
customer_transportation.save()
messages.success(request, 'Transport transportation was added successfully')
return redirect('add-customer-transportation-ajax')
else:
form = CustomerTransportationForm()
context = {
'form': form
}
return render(request, 'pages/add_customer_transportation.html', context)
# List all customer transportation
class AllCustomerTransportationListView(generic.ListView):
template_name = ''
context_object_name = 'all_customer_transportation_list'
model = CustomerTransportation
def get_queryset(self):
return AllCustomerTransportationListView.objects.all()
# updating customer transportation
def update_customer_transportation_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
customer_transportation = _update_ajax(CustomerTransportation, request)
return http.HttpResponse(
json.dumps({'pk': customer_transportation.id, 'customer_transportation_name': customer_transportation.customer_transportation_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a customer transportation
def delete_customer_transportation_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
customer_transportation = CustomerTransportation.objects.get(pk=request.POST.get('pk'))
customer_transportation_id = customer_transportation.id
customer_transportation.delete()
return http.HttpResponse(
content='customer_transportation <strong>{}</strong> has been successfully deleted'.format(customer_transportation_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport items
def add_transport_items_ajax(request, **kwargs):
if request.method == "POST":
form = TransportItemsForm(request.POST)
if form.is_valid():
transport_items = form.save(commit=False)
transport_items.transport_created_by_id = request.user.id
transport_items.save()
messages.success(request, 'Transport item was added successfully')
return redirect('add-transport-items-ajax')
else:
form = TransportItemsForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_items.html', context)
# List all transport items
class AllTransportItemsListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_items_list'
model = TransportItems
def get_queryset(self):
return AllTransportItemsListView.objects.all()
# updating transport items
def update_transport_items_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_items = _update_ajax(TransportItems, request)
return http.HttpResponse(
json.dumps({'pk': transport_items.id, 'transport_items_name': transport_items.transport_items_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport items
def delete_transport_items_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_items = TransportItems.objects.get(pk=request.POST.get('pk'))
transport_items_id = transport_items.transport_items_id
transport_items.delete()
return http.HttpResponse(
content='transport_items <strong>{}</strong> has been successfully deleted'.format(transport_items_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport type
def add_transport_type_ajax(request, **kwargs):
if request.method == "POST":
form = TransportTypeForm(request.POST)
if form.is_valid():
transport_type = form.save(commit=False)
transport_type.transport_type_created_by_id = request.user.id
transport_type.save()
messages.success(request, 'Transport type was added successfully')
return redirect('add-transport-type-ajax')
else:
form = TransportTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_type.html', context)
# List all transport type
class AllTransportTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_type_list'
model = TransportType
def get_queryset(self):
return AllTransportTypeListView.objects.all()
# updating transport type
def update_transport_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_type = _update_ajax(TransportType, request)
return http.HttpResponse(
json.dumps({'pk': transport_type.id, 'transport_type_name': transport_type.transport_type_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport type
def delete_transport_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_type = TransportType.objects.get(pk=request.POST.get('pk'))
transport_type_id = transport_type.transport_type_id
transport_type.delete()
return http.HttpResponse(
content='transport_type <strong>{}</strong> has been successfully deleted'.format(transport_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport category
def add_transport_category_ajax(request, **kwargs):
if request.method == "POST":
form = TransportCategoryForm(request.POST)
if form.is_valid():
transport_category = form.save(commit=False)
transport_category.transport_category_created_by_id = request.user.id
transport_category.save()
messages.success(request, 'Transport category was added successfully')
return redirect('add-transport-category-ajax')
else:
form = TransportCategoryForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_category.html', context)
# List all transport category
class AllTransportCategoryListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_category_list'
model = TransportCategory
def get_queryset(self):
return AllTransportCategoryListView.objects.all()
# updating transport category
def update_transport_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_category = _update_ajax(TransportCategory, request)
return http.HttpResponse(
json.dumps({'pk': transport_category.id, 'transport_category_name': transport_category.transport_category_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport category
def delete_transport_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_category = TransportCategory.objects.get(pk=request.POST.get('pk'))
transport_category_id = transport_category.transport_category_id
transport_category.delete()
return http.HttpResponse(
content='transport_category <strong>{}</strong> has been successfully deleted'.format(transport_category_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity
def add_commodity_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityForm(request.POST)
if form.is_valid():
commodity = form.save(commit=False)
commodity.commodity_created_by_id = request.user.id
commodity.save()
messages.success(request, 'Commodity was added successfully')
return redirect('add-commodity-ajax')
else:
form = CommodityForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity.html', context)
# List all commodity
class AllCommodityListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_list'
model = Commodity
def get_queryset(self):
return AllCommodityListView.objects.all()
# updating commodity
def update_commodity_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity = _update_ajax(Commodity, request)
return http.HttpResponse(
json.dumps({'pk': commodity.id, 'commodity_name': commodity.commodity_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity
def delete_commodity_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity = Commodity.objects.get(pk=request.POST.get('pk'))
commodity_id = commodity.commodity_id
commodity.delete()
return http.HttpResponse(
content='commodity <strong>{}</strong> has been successfully deleted'.format(commodity_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity metric
def add_commodity_metric_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityMetricForm(request.POST)
if form.is_valid():
commodity_metric = form.save(commit=False)
commodity_metric.commodity_metric_created_by_id = request.user.id
commodity_metric.save()
messages.success(request, 'Commodity metric was added successfully')
return redirect('add-commodity-metric-ajax')
else:
form = CommodityMetricForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_metric.html', context)
# List all commodity metric
class AllCommodityMetricListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_metric_list'
model = CommodityMetric
def get_queryset(self):
return AllCommodityMetricListView.objects.all()
# updating commodity metric
def update_commodity_metric_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_metric = _update_ajax(CommodityMetric, request)
return http.HttpResponse(
json.dumps({'pk': commodity_metric.id, 'commodity_metric_name': commodity_metric.commodity_metric_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity metric
def delete_commodity_metric_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_metric = CommodityMetric.objects.get(pk=request.POST.get('pk'))
commodity_metric_id = commodity_metric.commodity_metric_id
commodity_metric.delete()
return http.HttpResponse(
content='commodity_metric <strong>{}</strong> has been successfully deleted'.format(commodity_metric_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity type
def add_commodity_type_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityTypeForm(request.POST)
if form.is_valid():
commodity_type = form.save(commit=False)
commodity_type.commodity_type_created_by_id = request.user.id
commodity_type.save()
messages.success(request, 'Commodity type was added successfully')
return redirect('add-commodity-type-ajax')
else:
form = CommodityTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_type.html', context)
# List all commodity type
class AllCommodityTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_type_list'
model = CommodityType
def get_queryset(self):
return AllCommodityTypeListView.objects.all()
# updating commodity type
def update_commodity_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_type = _update_ajax(CommodityType, request)
return http.HttpResponse(
json.dumps({'pk': commodity_type.id, 'commodity_type_name': commodity_type.commodity_type_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity type
def delete_commodity_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_type = CommodityType.objects.get(pk=request.POST.get('pk'))
commodity_type_id = commodity_type.commodity_type_id
commodity_type.delete()
return http.HttpResponse(
content='commodity_type <strong>{}</strong> has been successfully deleted'.format(commodity_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity category
def add_commodity_category_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityCategoryForm(request.POST)
if form.is_valid():
commodity_category = form.save(commit=False)
commodity_category.commodity_category_created_by_id = request.user.id
commodity_category.save()
messages.success(request, 'Commodity Category was added successfully')
return redirect('add-commodity-category-ajax')
else:
form = CommodityCategoryForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_category.html', context)
# List all commodity category
class AllCommodityCategoryListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_category_list'
model = CommodityCategory
def get_queryset(self):
return AllCommodityCategoryListView.objects.all()
# updating commodity category
def update_commodity_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_category = _update_ajax(CommodityCategory, request)
return http.HttpResponse(
json.dumps({'pk': commodity_category.id, 'commodity_category_name': commodity_category.commodity_category_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity category
def delete_commodity_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_category = CommodityCategory.objects.get(pk=request.POST.get('pk'))
commodity_category_id = commodity_category.commodity_category_id
commodity_category.delete()
return http.HttpResponse(
content='commodity_category <strong>{}</strong> has been successfully deleted'.format(commodity_category_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new client
def add_client_ajax(request):
if request.method == "POST":
form = ClientForm(request.POST)
if form.is_valid():
try:
client = form.save(commit=False)
client.client_created_by_id = request.user.id
client.save()
messages.success(request, 'client was added successfully')
return redirect('add-client-type-ajax')
# return reverse('digifarming:add-client-ajax')
except (ValueError, KeyError):
messages.add_message(request, messages.ERROR, 'Invalid values encountered, Server Error')
# if form.is_valid():
# client = form.save(commit=False)
# client.client_created_by_id = request.user.id
# client.save()
# messages.success(request, 'Client was added successfully')
# return redirect('add_client_ajax')
else:
form = ClientForm()
context = {
'form': form
}
return render(request, 'pages/add_client.html', context)
# List all Client
class AllClientListView(generic.ListView):
template_name = ''
context_object_name = 'all_client_list'
model = Client
def get_queryset(self):
return AllClientListView.objects.all()
# updating client
def update_client_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = _update_ajax(Client, request)
return http.HttpResponse(
json.dumps({'pk': client.id, 'client_name': client.client_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a client
def delete_client_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = Client.objects.get(pk=request.POST.get('pk'))
client_id = client.client_id
client.delete()
return http.HttpResponse(
content='client <strong>{}</strong> has been successfully deleted'.format(client_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new client type
def add_client_type_ajax(request):
if request.method == "POST":
form = ClientTypeForm(request.POST)
if form.is_valid():
try:
client_type = form.save(commit=False)
client_type.client_type_created_by_id = request.user.id
client_type.save()
messages.success(request, 'client type was added successfully')
return redirect('add-client-type-ajax')
# return reverse('digifarming:add-client-ajax')
except (ValueError, KeyError):
messages.add_message(request, messages.ERROR, 'Invalid values encountered, Server Error')
else:
form = ClientTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_client_type.html', context)
# List all client types
class AllClientTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_client_type_list'
model = ClientType
def get_queryset(self):
return AllClientTypeListView.objects.all()
# updating client type
def update_client_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client_type = _update_ajax(ClientType, request)
return http.HttpResponse(
json.dumps({'pk': client_type.id, 'client_type': client_type.client_type, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting client type
def delete_client_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = ClientType.objects.get(pk=request.POST.get('pk'))
client_type_id = client.client_type_id
client.delete()
return http.HttpResponse(
content='client type <strong>{}</strong> has been successfully deleted'.format(client_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new facility type
def add_facility_type_ajax(request, **kwargs):
if request.method == "POST":
form = FacilityTypeForm(request.POST)
if form.is_valid():
facility_type = form.save(commit=False)
facility_type.facility_type_created_by_id = request.user.id
facility_type.save()
messages.success(request, 'Facility type was added successfully')
return redirect('add-facility-type-ajax')
else:
form = FacilityTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_facility_type.html', context)
# List all Facility types
class AllFacilityTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_facility_type_list'
model = FacilityType
def get_queryset(self):
return AllFacilityTypeListView.objects.all()
# updating facility type
def update_facility_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = _update_ajax(FacilityType, request)
return http.HttpResponse(
json.dumps({'pk': facility.id, 'facility_type': facility.facility_type, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting facility type
def delete_facility_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = FacilityType.objects.get(pk=request.POST.get('pk'))
facility_type_id = facility.facility_type_id
facility.delete()
return http.HttpResponse(
content='facility type <strong>{}</strong> has been successfully deleted'.format(facility_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new facility
@login_required
def add_facility_ajax(request, **kwargs):
if request.method == "POST":
form = FacilityForm(request.POST)
if form.is_valid():
facility = form.save(commit=False)
facility.created_by_id = request.user.id
facility.save()
messages.success(request, 'Facility was added successfully')
return redirect('add-facility-ajax')
else:
form = FacilityForm()
context = {
'form': form
}
return render(request, 'pages/add_facility.html', context)
# List all Facility
class AllFacilityListView(generic.ListView):
template_name = ''
context_object_name = 'all_facility_list'
model = Facility
def get_queryset(self):
return AllFacilityListView.objects.all()
# updating facility
def update_facility_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = _update_ajax(Facility, request)
return http.HttpResponse(
json.dumps({'pk': facility.id, 'facility_name': facility.facility_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a facility
def delete_facility_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = Facility.objects.get(pk=request.POST.get('pk'))
facility_id = facility.facility_id
facility.delete()
return http.HttpResponse(
content='facility <strong>{}</strong> has been successfully deleted'.format(facility_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# TODO
# Creating a new rating
def add_rating_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
rate = Rating
rate.user_id = request_params.get('user_id')
rate.rating = request_params.get('rating')
rate.comment = request_params.get('comment')
rate.save()
return http.HttpResponse(
json.dumps({'id': rate.id, 'rating': rate.rating}),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. commodity not created")
# List all ratings
class AllRatingsListView(generic.ListView):
template_name = ''
context_object_name = 'all_ratings_list'
model = Facility
def get_queryset(self):
return AllRatingsListView.objects.all()
# updating a rating
def update_rating_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
rate = _update_ajax(Rating, request)
return http.HttpResponse(
json.dumps({'pk': rate.id }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a rating
def delete_rating_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
rate = Rating.objects.get(pk=request.POST.get('pk'))
rating_id = rate.rating_id
rate.delete()
return http.HttpResponse(
content='rating <strong>{}</strong> has been successfully deleted'.format(rating_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# TODO change the renders and the redirects
def user_register(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.success(request, 'Registered successfully')
return redirect('user_login')
else:
return render(request, 'pages/register.html', {'form': form})
else:
form = UserForm()
return render(request, 'pages/register.html', {'form': form})
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = authenticate(email=email, password=password)
if user is not None:
login(request, user)
return redirect('add-facility-ajax')
else:
try:
user = User.objects.get(email=email)
form.add_error('password', "invalid password")
except User.DoesNotExist:
form.add_error('email', "invalid email address")
else:
form = LoginForm()
return render(request, 'pages/login.html', {'form': form})
def user_logout(request):
logout(request)
return redirect('user_login')
# @login_required
def add_job_title(request):
if request.method == "POST":
form = JobTitleForm(request.POST)
if form.is_valid():
job_title = form.save(commit=False)
job_title.job_created_by_id = request.user.id
job_title.save()
messages.success(request, 'Job title was created successfully')
return redirect('add-job-title')
else:
form = JobTitleForm()
context = {
'form': form
}
return render(request, 'pages/add_job_title.html', context)
@login_required
def all_job_title(request):
job_titles = JobTitle.objects.select_related().filter(job_title_status=1)
context = {
'job_titles': job_titles
}
return render(request, 'pages/all_job_titles.html', context)
# def job_title_details(request, job_title_id):
# job_title = get_object_or_404(JobTitle, id=job_title_id)
# staff = Staff.objects.filter(staff_job_title=job_title, staff_user__status=1)
# context = {
# 'job_title': job_title,
# 'staff': staff
# }
# return render(request, 'pages/job_title_details.html', context)
@login_required
def update_job_title(request, job_title_id):
job_title = JobTitle.objects.get(id=job_title_id)
if request.method == "POST":
form = JobTitleForm(request.POST, instance=job_title)
if form.is_valid():
job_title = form.save()
messages.success(request, 'Job title was updated successfully')
return redirect('update_job_title', job_title_id=job_title_id)
else:
form = JobTitleForm(instance=job_title)
context = {
'job_title': job_title,
'form': form
}
return render(request, 'pages/update_job_title.html', context)
@login_required
def deactivate_job_title(request, job_title_id):
job_title = JobTitle.objects.get(id=job_title_id)
job_title.job_title_status = 0
job_title.save(update_fields=['job_title_status'])
messages.add_message(request, messages.SUCCESS, 'Job title removed successfully')
return redirect('all_job_titles')
@login_required
def add_job_shift(request):
if request.method == "POST":
form = JobShiftForm(request.POST)
if form.is_valid():
job_shift = form.save(commit=False)
job_shift.created_by_id = request.user.id
job_shift.save()
messages.success(request, 'Job shift was added successfully')
return redirect('add-job-shift')
else:
form = JobShiftForm()
context = {
'form': form
}
return render(request, 'pages/add_job_shift.html', context)
@login_required
def all_job_shifts(request):
job_shifts = JobShift.objects.filter(job_shift_status=1)
context = {
'job_shifts': job_shifts
}
return render(request, 'pages/all_job_shifts.html', context)
@login_required
def update_job_shift(request, job_shift_id):
job_shift = JobShift.objects.get(id=job_shift_id)
if request.method == "POST":
form = JobShiftForm(request.POST, instance=job_shift)
if form.is_valid():
job_shift = form.save()
messages.success(request, 'Job shift was updated successfully')
return redirect('update_job_shift', job_shift_id=job_shift_id)
else:
form = JobShiftForm(instance=job_shift)
context = {
'job_shift': job_shift,
'form': form
}
return render(request, 'pages/update_job_shift.html', context)
@login_required
def deactivate_job_shift(request, job_shift_id):
job_shift = JobShift.objects.get(id=job_shift_id)
job_shift.job_shift_status = 0
job_shift.save(update_fields=['job_shift_status'])
messages.add_message(request, messages.SUCCESS, 'Job shift removed successfully')
return redirect('all_job_shifts')
# @login_required
def add_staff(request):
if request.method == "POST":
# user_form = UserForm(request.POST)
staff_form = StaffForm(request.POST)
if staff_form.is_valid():
# Save general user details
# user = user_form.save(commit=False)
# user.is_staff = True
# user.save()
# Save staff specific details
staff = staff_form.save(commit=False)
# staff.staff_user_id = user.id
staff.staff_created_by_id = request.user.id
staff.save()
# Success message
messages.success(request, 'The staff has been successfully created')
return redirect('add-staff')
else:
user_form = UserForm()
staff_form = StaffForm()
context = {
'user_form': user_form,
'staff_form': staff_form
}
return render(request, 'pages/add_staff.html', context)
@login_required
def current_staff(request):
staff = Staff.objects.select_related().filter(staff_user__status=1)
context = {
'staff': staff
}
return render(request, 'pages/current_staff.html', context)
@login_required
def past_staff(request):
staff = Staff.objects.select_related().filter(staff_user__status=0)
context = {'staff': staff}
return render(request, 'pages/past_staff.html', context)
@login_required
def update_staff(request, staff_id):
staff = Staff.objects.get(id=staff_id)
user = User.objects.get(id=staff.staff_user.id)
if request.method == "POST":
user_form = UserUpdateForm(request.POST, instance=user)
staff_form = StaffForm(request.POST, instance=staff)
if user_form.is_valid() and staff_form.is_valid():
user = user_form.save()
staff = staff_form.save()
messages.success(request, 'Staff was updated successfully')
return redirect('update_staff', staff_id=staff_id)
else:
user_form = UserUpdateForm(instance=user)
staff_form = StaffForm(instance=staff)
context = {
'user_form': user_form,
'staff_form': staff_form,
'staff': staff
}
return render(request, 'pages/update_staff.html', context)
@login_required
def deactivate_staff(request, staff_id):
# Update in user table
user = User.objects.get(id=staff_id)
user.status = 0
user.save(update_fields=['status'])
# Update in staff table
staff = Staff.objects.get(staff_user=staff_id)
staff.staff_end_date = timezone.now()
staff.save(update_fields=['staff_end_date'])
messages.add_message(request, messages.SUCCESS, 'Staff was removed successfully')
return redirect('current_staff')
def all_visualizations(request):
context = {'name': 'Visualization'}
return render(request, 'pages/visualization.html', context) | gatirobi/digifarming | digifarming/digifarming/views.py | views.py | py | 53,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "digifarming.models.ArrivalView",
"line_number": 62,
"usage_type": "name"
},
{... |
19288984909 | import tensorflow as tf
import numpy as np
from dataset import get_dataset, get_rotation_augmentor, get_translation_augmentor
from model import build_model
AUTOTUNE = tf.data.experimental.AUTOTUNE
dataset, num_classes = get_dataset()
model = build_model(num_classes)
model.load_weights('./saved_weights/weights')
rng = np.random.RandomState()
test_dataset = dataset.concatenate(
dataset.repeat(500)
.map(get_rotation_augmentor(rng), num_parallel_calls=AUTOTUNE)
.map(get_translation_augmentor(rng), num_parallel_calls=AUTOTUNE))
model.evaluate(test_dataset.batch(8))
# print(model.predict(test_dataset.batch(8).take(1))) | wdecay/ShapeClassification | test_model.py | test_model.py | py | 640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.data",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "dataset.get_dataset",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "model.build_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "model.load_w... |
6163296593 | '''
Classe wordCloudGenerator que a partir de um conjunto de token gera uma nuvem de palavra
Argumentos:
text: lista de token (preferencilmente geradas pela classe pdfReader) (OBRIGATORIO)
max_font_size: tamanho maximo das palavras na nuvem
max_words: numero maximo de palavras na nuvem
background_color: color de fundo da nuvem
'''
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
class wordCloud(object):
def __init__(self, text, max_font_size=50, max_words=100, background_color="white"):
# Transforma token em uma string unica
self.text = ' '.join(text)
self.wordcloud = WordCloud(max_font_size=max_font_size, max_words=max_words, background_color=background_color).generate(self.text)
def generator(self):
# Gera e plota a wordcloud
plt.imshow(self.wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
def save(self, file_name):
# Gera e salva no diretorio atual a wordcloud
self.wordcloud.to_file(file_name)
if __name__ == '__main__':
# Exemplo de uso da classe
import pdfReader as pdf
# classe pdfReader para gerar o conjunto de tokens do pdf e stopwords adicionais como parametro
reader = pdf.PDFReader('example.pdf', add_stopwords=['et', 'al'])
# classe wordCloud com os tokens gerados como parametros
wc = wordCloud(reader.getTokens())
# Plota e salva
wc.generator()
#wc.save('wc.png') | InfoEduc/Automatizando-Pesquisas-Bibliometricas | wordCloudGenerator.py | wordCloudGenerator.py | py | 1,515 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "wordcloud.WordCloud",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplo... |
18518883640 | import random
from multiprocessing import Pool
from ai_player import Ai_player
from deck import Deck
class Population:
"""
blackjack Ai player population
"""
POPULATION_SIZE = 400
BJ_ROUNDS = 50000
PARENT_SIZE = 5
MAX_THREADS = 40 # most efficient
def __init__(self):
def __init_players():
return [Ai_player() for _ in range(self.POPULATION_SIZE)]
self.generation = 0
self.best_player = None
self.players = __init_players()
self.__decks = []
def create_new_generation(self):
self.__create_new_gen_players()
self.generation += 1
def __create_new_gen_players(self):
parents = self.__get_best_players(self.PARENT_SIZE)
# an array of ai players that will make the next generation
players_parents = random.choices(parents,
weights=(55, 20, 15, 8, 2),
k=(self.POPULATION_SIZE -
self.PARENT_SIZE))
self.players = [Ai_player(player) for player in players_parents]
# for i in range(self.PARENT_SIZE):
for i in range(1):
self.players.append(parents[i])
def __get_best_players(self, num) -> list[Ai_player]:
"""
gets the top num highest ranked players
"""
return self.players[:num]
def play_generation(self):
"""
runs the current generation of players
"""
# generate the decks
self.__decks = [Deck(6) for _ in range(self.BJ_ROUNDS)]
# shuffle the decks
for deck in self.__decks:
deck.shuffle()
with Pool(self.MAX_THREADS) as pool:
players = pool.map(self.thread_worker, self.players)
self.players = players
# set the best player
self.players.sort(key=lambda x: x.get_fitness(), reverse=True)
self.best_player = self.players[0]
def thread_worker(self, player: Ai_player):
player.play_rounds(self.__decks)
return player
| BenPVandenberg/blackjack-ai | Dustin_Marks/population.py | population.py | py | 2,112 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ai_player.Ai_player",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.choices",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ai_player.Ai_player",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "ai_player.Ai_p... |
38608858024 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 11 22:34:18 2018
@author: Roshan Zameer Syed
ID : 99999-2920
Description : Multivariate linear regression and backward elimination
"""
# Reading the dataset
import pandas as pd
data = pd.read_csv('Advertising.csv')
# Feature and response matrix
X = data.iloc[:,[1,2,3]].values
y = data.iloc[:,-1].values
# Splitting the dataset into Training and Test sets
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state=0)
# Linear regresssion algorithm
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,y_train)
ypred = regressor.predict(X_test)
import statsmodels.formula.api as sm
import numpy as np
# Adding new column of one's to X
X = np.append(arr = np.ones((200,1)), values = X, axis = 1)
# Running Backward elimination algorithm
X_opt = X[:,[0,1,2,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,1,2]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
import matplotlib.pyplot as plt
plt.scatter(X,y)
"""
• How many observations are in this data set ?
Ans : 200
• How many features are in this data set ?
Ans : 3 features
• What is the response for this data set ?
Ans : The last column sales is the response
• Which predictors are the most significant for this dataset ? Please explain Why ?
Ans : Column 1- TV and column 2- Radio are the most significant predictors because
their P-value is less than the threshold.
"""
| syedroshanzameer/Machine-Learning | Multi-variate Linear Regression/multiRegression.py | multiRegression.py | py | 1,600 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 23,
"usage_type": "call"
... |
42245347977 | #!/usr/bin/python
# noinspection PyUnresolvedReferences
import json
import requests
from yoctopuce.yocto_api import *
from yoctopuce.yocto_display import *
from yoctopuce.yocto_anbutton import *
display_list = []
class SimpleXMBC(object):
def __init__(self, host, port, user, password):
self._password = password
self._user = user
self._port = port
self._host = host
self._id = 1
self._url = 'http://%s:%d/jsonrpc' % (self._host, self._port)
def json_rpc_request(self, method, params):
headers = {'content-type': 'application/json'}
# Example echo method
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": self._id,
}
response = requests.post(
self._url, data=json.dumps(payload), headers=headers).json()
self._id += 1
if 'error' in response:
print(response['error'])
return response
def get_info_to_display(self):
res = self.json_rpc_request('Player.GetActivePlayers', {})
if 'result' in res and len(res['result']) > 0:
player_id = res['result'][0]['playerid']
player_type = res['result'][0]['type']
else:
return 0, "not playing anything"
params = {"playerid": player_id, "properties": ["percentage"]}
res = self.json_rpc_request('Player.GetProperties', params)
if 'result' in res:
percentage = res['result']['percentage']
else:
percentage = 0
params = {"playerid": player_id,
"properties": ["title", "album", "artist", "season", "episode", "duration", "showtitle", "tvshowid",
"thumbnail", "file", "fanart", "streamdetails"]}
res = self.json_rpc_request('Player.GetItem', params)
if 'result' in res:
if player_type == "audio":
info = res['result']['item']['title'] + " (" + res['result']['item']['artist'][0] + ")"
else:
info = res['result']['item']['label']
else:
info = "not playing anything"
return percentage, info
def up(self):
self.json_rpc_request('Input.Up', {})
print("up)")
def down(self):
self.json_rpc_request('Input.down', {})
print('down')
def left(self):
self.json_rpc_request('Input.Left', json.loads('{}'))
print('left')
def right(self):
self.json_rpc_request('Input.Right', json.loads('{}'))
print('right')
def ok(self):
print('ok')
self.json_rpc_request('Input.Select', json.loads('{}'))
def back(self):
self.json_rpc_request('Input.Back', json.loads('{}'))
print('back')
xbmc_interface = SimpleXMBC('localhost', 8080, 'xbmc', '')
def init_screen(ydisplay):
"""
:type ydisplay: YDisplay
"""
ydisplay.resetAll()
w = ydisplay.get_displayWidth()
h = ydisplay.get_displayHeight()
layer1 = ydisplay.get_displayLayer(1)
layer1.selectGrayPen(0)
layer1.drawBar(0, 0, w - 1, h - 1)
layer1.selectGrayPen(255)
layer1.drawText(w / 2, h / 2, YDisplayLayer.ALIGN.CENTER, "detected!")
def an_button_callback(anbutton, value):
"""
:type value: str
:type anbutton: YAnButton
"""
if (anbutton.get_isPressed() == YAnButton.ISPRESSED_TRUE):
last = anbutton.get_userData()
if last == YAnButton.ISPRESSED_FALSE:
print("send command for " + anbutton.get_friendlyName())
funcid = anbutton.get_functionId()
if funcid == 'anButton1':
xbmc_interface.left()
elif funcid == 'anButton2':
xbmc_interface.up()
elif funcid == 'anButton3':
xbmc_interface.right()
elif funcid == 'anButton4':
xbmc_interface.down()
elif funcid == 'anButton5':
xbmc_interface.ok()
elif funcid == 'anButton6':
xbmc_interface.back()
anbutton.set_userData(anbutton.get_isPressed())
def device_arrival(module):
"""
:type module: YModule
"""
serial_number = module.get_serialNumber()
print("plug of " + serial_number)
product = module.get_productName()
if (product == "Yocto-MaxiDisplay") or product == "Yocto-Display":
display = YDisplay.FindDisplay(serial_number + ".display")
init_screen(display)
display_list.append(display)
for i in range(1, 7):
button = YAnButton.FindAnButton("%s.anButton%d" % (serial_number, i))
button.set_userData(button.get_isPressed())
button.registerValueCallback(an_button_callback)
def device_removal(module):
print("unplug of " + module.get_serialNumber())
def main():
errmsg = YRefParam()
YAPI.InitAPI(0, errmsg)
YAPI.RegisterDeviceArrivalCallback(device_arrival)
YAPI.RegisterDeviceRemovalCallback(device_removal)
if YAPI.RegisterHub("usb", errmsg) < 0:
print("Unable register usb :" + str(errmsg))
return -1
try:
last_title = ''
last_progress = 0
plug_unplug_delay = 0
while True:
progress, title = xbmc_interface.get_info_to_display()
if (progress != last_progress) or (last_title != title):
last_progress = progress
last_title = title
for display in display_list:
w = display.get_displayWidth()
h = display.get_displayHeight()
layer0 = display.get_displayLayer(0)
layer0.selectGrayPen(0)
layer0.drawBar(0, 0, w - 1, h - 1)
layer0.selectGrayPen(255)
layer0.drawText(w / 2, h / 2, YDisplayLayer.ALIGN.CENTER, title)
if progress > 0:
layer0.drawBar(0, h - 1, int(progress * w / 100), h - 1)
display.swapLayerContent(0, 1)
plug_unplug_delay -= 1
if plug_unplug_delay < 0:
YAPI.UpdateDeviceList()
plug_unplug_delay = 5
YAPI.Sleep(1000)
except KeyboardInterrupt:
print("exit with Ctrl-C")
return -1
if __name__ == '__main__':
main() | yoctopuce-examples/xbmc_remote | xbmc_remote.py | xbmc_remote.py | py | 6,386 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 8... |
10167749049 | from socket_webserver import Socketserver
import json
# Creating server instance
server = Socketserver()
# Configuring host and port
server.host = '127.0.0.1'
server.port = 8080
""" Two example functions to return response. Upper one returns simple json response and lower one returns html response
You could create something like views.py for handler functions and urls.py for routes
'request' contains dictionary of all the request arguments currently supported """
def home(request):
html = '''
<h1>This text is big</h1>
<p>This is small</p>
'''
return html, 200, "text/html"
def demo(request):
data = {
'name': 'Custom api endpoint made with pywebserver',
'target': request['target'],
'data': [x * 2 for x in range(30)]
}
return json.dumps(data), 200, "application/json"
routes = {
'/': home,
'/home': demo
}
server.routes = routes # Apply routes for server
server.run()
| miikalehtonen/pywebserver | main.py | main.py | py | 963 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket_webserver.Socketserver",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
}
] |
10625869502 | from typing import List
from eth_vertigo.incremental.store import MutationRecord, IncrementalMutationStore
from eth_vertigo.core import Mutation
class IncrementalRecorder:
def record(self, mutations: List[Mutation]) -> IncrementalMutationStore:
store = IncrementalMutationStore()
store.known_mutations = list(
map(
self._mutation_to_record,
[m for m in mutations if m.crime_scenes]
)
)
return store
@staticmethod
def _mutation_to_record(mutation: Mutation) -> MutationRecord:
result = MutationRecord()
result.new_text = mutation.value
result.original_text = mutation.original_value
result.source_file_name = mutation.source_file_name
result.location = ":".join(map(str, mutation.location))
result.line_number = mutation.line_number
result.crime_scenes = mutation.crime_scenes
return result
| JoranHonig/vertigo | eth_vertigo/incremental/record.py | record.py | py | 958 | python | en | code | 180 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "eth_vertigo.core.Mutation",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "eth_vertigo.incremental.store.IncrementalMutationStore",
"line_number": 9,
"usage_type": "call"
},
... |
6301592120 | # !/user/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/12 21:11
# @Author : chineseluo
# @Email : 848257135@qq.com
# @File : run.py
# @Software: PyCharm
import os
from Common.publicMethod import PubMethod
import logging
from selenium.webdriver.common.by import By
from Base.baseBy import BaseBy
pub_api = PubMethod()
root_dir = os.path.dirname(os.path.dirname(__file__))
config_path = os.path.join(root_dir, 'ActivityObject')
config_path = os.path.abspath(config_path)
class ElemParams:
def __init__(self, dir_name, file_name, root_dir_name=config_path):
self.elem_name = []
self.desc = []
self.data = []
self.info = []
self.__run(dir_name, root_dir_name, file_name)
def __run(self, dir_name, root_dir_name, file_name):
config_dir_name = os.path.join(root_dir_name, dir_name)
file_path = os.path.abspath(os.path.join(config_dir_name, file_name))
try:
self.info = PubMethod().read_yaml(file_path)['parameters']
for i in self.info:
self.elem_name.append(i['elem_name'])
self.desc.append(i['desc'])
self.data.append(i['data'])
except Exception as e:
logging.error("文件解析失败!{},文件路径:{}".format(e, file_path))
def get_locator(self, elem_name):
"""
@param page_elem_class:传入页面元素对象
@param elem_name:传入自定义的元素名称
@return:
"""
page_obj_elem = self.info
elems_info = page_obj_elem
for item in elems_info:
if item["elem_name"] == elem_name:
method = item["data"]["method"]
value = item["data"]["value"]
logging.info("元素名称为:{},元素定位方式为:{},元素对象值为:{}".format(elem_name, method, value))
if method == "ID" and value is not None:
elem_locator = (By.ID, value)
return elem_locator
elif method == "XPATH" and value is not None:
elem_locator = (By.XPATH, value)
return elem_locator
elif method == "LINK_TEXT" and value is not None:
elem_locator = (By.LINK_TEXT, value)
return elem_locator
elif method == "PARTIAL_LINK_TEXT" and value is not None:
elem_locator = (By.PARTIAL_LINK_TEXT, value)
return elem_locator
elif method == "NAME" and value is not None:
elem_locator = (By.NAME, value)
return elem_locator
elif method == "TAG_NAME" and value is not None:
elem_locator = (By.TAG_NAME, value)
return elem_locator
elif method == "CLASS_NAME" and value is not None:
elem_locator = (By.CLASS_NAME, value)
return elem_locator
elif method == "CSS_SELECTOR" and value is not None:
elem_locator = (By.CSS_SELECTOR, value)
return elem_locator
elif method == "IOS_UIAUTOMATION" and value is not None:
elem_locator = (BaseBy.IOS_UIAUTOMATION, value)
return elem_locator
elif method == "IOS_PREDICATE" and value is not None:
elem_locator = (BaseBy.IOS_PREDICATE, value)
return elem_locator
elif method == "IOS_CLASS_CHAIN" and value is not None:
elem_locator = (BaseBy.IOS_CLASS_CHAIN, value)
return elem_locator
elif method == "ANDROID_UIAUTOMATOR" and value is not None:
elem_locator = (BaseBy.ANDROID_UIAUTOMATOR, value)
return elem_locator
elif method == "ANDROID_VIEWTAG" and value is not None:
elem_locator = (BaseBy.ANDROID_VIEWTAG, value)
return elem_locator
elif method == "WINDOWS_UI_AUTOMATION" and value is not None:
elem_locator = (BaseBy.WINDOWS_UI_AUTOMATION, value)
return elem_locator
elif method == "ACCESSIBILITY_ID" and value is not None:
elem_locator = (BaseBy.ACCESSIBILITY_ID, value)
return elem_locator
elif method == "IMAGE" and value is not None:
elem_locator = (BaseBy.IMAGE, value)
return elem_locator
elif method == "CUSTOM" and value is not None:
elem_locator = (BaseBy.CUSTOM, value)
return elem_locator
else:
logging.error("元素名称:{},此元素定位方式异常,定位元素值异常,请检查!!!".format(elem_name))
# 注册yaml文件对象
class LoginActivityElem(ElemParams):
def __init__(self):
super(LoginActivityElem, self).__init__('Login_activity', 'Login_activity.yaml')
if __name__ == '__main__':
login_activity = LoginActivityElem()
print(login_activity.get_locator("phone_number"))
| chineseluo/app_auto_frame_v1 | ActivityObject/elemParams.py | elemParams.py | py | 5,248 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "Common.publicMethod.PubMethod",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.jo... |
24300694321 | from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, DataFrame
from pyspark.sql.functions import lit
from pyspark.sql.functions import split, explode, monotonically_increasing_id
import numpy as np
from numpy import linalg as LA
from scipy.sparse import csr_matrix
import json
import datetime
from tqdm.notebook import trange, tqdm
## TODO:
# - Implementar es_recommendation:
# - Reemplazar variables locales de item_indexes y user_indexes por el indice en es_recommendation
# - Hacer update_item_viewers en elastic.py, que actualice el indice de items correspondiente en elastic_search
from sar.models.elastic import ElasticHelper
from sar.models.hilfe import Utils
class SAR():
# Inicializo el modelo
def __init__(self, es_recommendation, es_metadata, TIME_SCORING=(17/3)*10**8, W_SCORING=1):
# elastic search con la metadata
self.es_helper = ElasticHelper(es_recommendation, es_metadata)
# Matrices del modelo
self.A = None
self.C = None
self.S = None
pass
def load_new_users(self, dataset, verb=False):
# Dado un dataset,
# cargar los usuarios nuevos a los indices del modelo
# obtengo los usuarios unicos del dataset
users_in_ds:set = set([u.user_id for u in dataset.select("user_id").collect()])
users_in_ds:list = list(users_in_ds)
# cantidad de user_ids ignorados
ignored:int = 0
for i in trange(len(users_in_ds)):
userid = users_in_ds[i]
if userid not in self.user_indexes.keys():
index = len(self.user_indexes)
# agrego al usuario al indice recommendation de es
self.es_helper.add_user(userid, index)
else:
ignored += 1
if verb:
print("* (pre) N° de usuarios únicos: ", len(self.user_indexes.keys()))
print("* (post) N° de usuarios únicos: ", len(self.user_indexes.keys()))
print("* Diferencia de usuarios: ", len(self.user_indexes) - ignored)
return ignored
def load_new_items(self, dataset, verb=False):
# Dado un dataset,
# cargar los items nuevos a los indices del modelo
# cantidad de items antes de arrancar el ciclo
n_items_pre = self.es_helper.n_items_in_recommendation()
# info de items omitidos
info:dict = { "missing_metadata": [] }
# obtengo los items unicos del dataset
items_in_ds = set([i.group_id for i in dataset.select("group_id").collect()])
items_in_ds = list(items_in_ds)
for j in trange(len(items_in_ds)):
itemid = items_in_ds[j]
if itemid not in self.item_indexes.keys():
# el index es por aparicion
index = len(self.item_indexes)
# leo la metadata segun el group_id
metadata = self.es_metadata.get_item_metadata(itemid)
# solo agrego items que tengan metadata
if metadata == dict():
info["missing_metadata"].append(itemid)
else:
self.es_helper.add_item(itemid, index)
if verb:
print("* (pre) N° de items únicos: ", len(self.item_indexes.keys()))
print("* (post) N° de items únicos: ", len(self.item_indexes.keys()))
print("* Diferencia de items: ", len(self.item_indexes.keys()) - n_items_pre)
print("* Items omitidos: ", len(info["missing_metadata"]))
return info
def build_coocurrence_matrix(self) -> csr_matrix:
M:int = self.es_helper.n_items_in_recommendation() # n items en el es_recommendation
C:csr_matrix = csr_matrix((M,M)).tolil()
for i, item_i in enumerate(M):
index_i:int = self.es_metadata.get_item_index(item_i) # index del item i
item_i_viewers:set = self.es_metadata.get_item_viewers(item_i) # usuarios que vieron el item i
for j, item_j in enumerate(M):
index_j:int = self.es_metadata.get_item_index(item_j) # index del item j
item_j_viewers:set = self.es_metadata.get_item_viewers(item_j) # usuarios que vieron el item j
C[index_j, index_i] = len(item_j_viewers.intersection(item_i_viewers))
return C
def build_similarity_matrix(self) -> csr_matrix:
return self.C
def scoring_function(self, event_time) -> float:
t_k = event_time.timestamp()
t_0 = datetime.datetime.now().timestamp()
exp = - (t_0 - t_k) / self.T
return self.W * 2 ** exp
def build_affinity_matrix(self, dataset) -> csr_matrix:
# Dado un dataset, actualiza la matriz A
M = self.es_helper.n_items_in_recommendation()
N = self.es_helper.n_users_in_recommendation()
self.A:csr_matrix = csr_matrix((N, M)).tolil()
ignored:int = 0
for interaction in dataset.collect():
user_id, group_id, event_time = Utils.decode_interaction(interaction)
if self.es_helper.is_valid_group_id(group_id) and self.es_helper.is_valid_user_id(user_id):
index_item = self.es_helper.get_item_index(group_id)
index_user = self.es_helper.get_user_index(user_id)
self.A[index_user, index_item] += self.scoring_function(event_time)
else:
ignored += 1
return ignored
def fit(self, dataset):
# Dado un dataset, actualiza las matrices C, S, A
# actualiza el indice con las vistas por item
self.es_metadata.update_items_viewers(dataset)
# actualizo las matrices
self.C = self.build_coocurrence_matrix()
self.S = self.build_similarity_matrix()
self.A = self.build_affinity_matrix()
# armo la matriz de predicciones
self.Y = self.A @ self.S
pass
def recommend_similar_k_items(self,
group_id,
k:int,
include=[],
exclude=[],
enhance=[]):
# Dado un group_id y ...
# Recomendar los k items mas similares
itemIndex = self.es_helper.get_item_index(group_id)
item2item:list = [
(similarity, i) for i, similarity in enumerate(list(self.S[itemIndex].toarray()[0]))
]
# eliminamos al elemento del que se buscan los similares
item2item.pop(itemIndex)
## filtro y enhance
BIAS = 10
for _, index in item2item:
item_metadata = self.es_metadata.get_item_metadata(index, index=True)
# filtrar
if item_metadata["filtros"] not in include:
item2item[index] = -1
# potenciar
if item_metadata["filtros"] in enhance:
item2item[index] += BIAS
pass
# ordeno los items
ordered_items = sorted(item2item, key=lambda x: x[0], reverse=True)
recommendations:list = ordered_items[:k]
# hay items con scoring 0?
l = sum([1 for scoring, index in recommendations if scoring == 0])
if l > 0:
# agrego l items populares a recommendations
recommendations = recommendations[:k-l]
# recommendations.extend(getTopLMovies(self.Y, l, exclude=exclude))
# los dejo en el formato valido
top_k:list = []
for n_views, index in recommendations:
rec = (self.es_helper.get_group_id[index], n_views)
top_k.append(rec)
return top_k
def recommend_k_items_to_user(self, user_id):
# Dado un user_id y ...
# Recomendar los k items con mas afinidad
pass
| SebasAndres/Recomendadores | src/sar/models/sar.py | sar.py | py | 8,299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sar.models.elastic.ElasticHelper",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tqdm.notebook.trange",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tqdm.notebook.trange",
"line_number": 78,
"usage_type": "call"
},
{
"api_na... |
22771811138 | # -*- coding: utf-8 -*-
# This file is part of CFVVDS.
#
# CFVVDS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# CFVVDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CFVVDS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx
#Simple printing function. Need more info about vanguard decks.
#TO DO:
class DeckPrinter(wx.Printout):
def __init__(self, deck):
wx.Printout.__init__(self)
self.Deck = deck
self.StartX = 40
self.StartY = 40
self.VSpacer = 60
self.CurrentX = self.StartX
self.CurrentY = self.StartY
self.FirstFont = wx.Font(pointSize=48, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD, faceName='Arial')
self.SecondFont = wx.Font(pointSize=48, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_NORMAL, faceName='Arial')
self.ThirdFont = wx.Font(pointSize=54, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD, faceName='Arial')
def OnBeginDocument(self, start, end):
return super(DeckPrinter, self).OnBeginDocument(start, end)
def OnEndDocument(self):
super(DeckPrinter, self).OnEndDocument()
def OnBeginPrinting(self):
super(DeckPrinter, self).OnBeginPrinting()
def OnEndPrinting(self):
super(DeckPrinter, self).OnEndPrinting()
def OnPreparePrinting(self):
super(DeckPrinter, self).OnPreparePrinting()
def HasPage(self, page):
if page <= 1:
return True
else:
return False
def GetPageInfo(self):
return (1, 1, 1, 1)
#TO DO: Change structure to fit CFV decks
def OnPrintPage(self, page):
monsters = self.Deck.GetMonsters()
triggers = self.Deck.GetTrigger()
maindeckcount = len(monsters) + len(triggers)
dc = self.GetDC()
dc.SetFont(self.ThirdFont)
dc.DrawText('Main Deck: ' + str(maindeckcount), self.CurrentX, self.CurrentY)
self.NewLine()
self.NewLine()
dc.SetFont(self.FirstFont)
dc.DrawText('Normal Units: ' + str(len(monsters)), self.CurrentX, self.CurrentY)
self.NewLine()
dc.SetFont(self.SecondFont)
for c in monsters:
dc.DrawText(c.Name, self.CurrentX, self.CurrentY)
self.NewLine()
self.NewLine()
dc.SetFont(self.FirstFont)
dc.DrawText('Trigger Units: ' + str(len(triggers)), self.CurrentX, self.CurrentY)
self.NewLine()
dc.SetFont(self.SecondFont)
for c in triggers:
dc.DrawText(c.Name, self.CurrentX, self.CurrentY)
self.NewLine()
return True
def NewLine(self):
self.CurrentY += self.VSpacer
def AddVSpace(self, n):
self.CurrentY += n
def AddHSpace(self, n):
self.CurrentX += n | swak/cardfight-vanguard-vds | printer.py | printer.py | py | 3,420 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wx.Printout",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "wx.Printout.__init__",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wx.Printout",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "wx.Font",
... |
31141721992 | '''
Analyse observation basket
'''
import argparse
import joblib
import pandas as pd
import apriori
import helpers
from rules import RuleGenerator
parser = argparse.ArgumentParser(description='Convert Halias RDF dataset for data mining')
parser.add_argument('minsup', help='Minimum support', nargs='?', type=float, default=0.8)
args = parser.parse_args()
apriori.NUM_CORES = 1
MINSUP = args.minsup
itemsets = helpers.read_observation_basket(helpers.DATA_DIR + 'observation.basket')
all_items = list(set([item for itemset in itemsets for item in itemset]))
print(len(itemsets))
print(len(all_items))
#print(itemsets[:1])
print('\nSupport {:.3f} frequent itemsets:\n'.format(MINSUP))
freq_items = apriori.apriori(itemsets, all_items, MINSUP, verbose=True)
print(freq_items[-1])
print(len(freq_items))
joblib.dump(freq_items, helpers.DATA_DIR + 'freq_items_{:.3f}.pkl'.format(MINSUP))
ruler = RuleGenerator(itemsets, freq_items)
rules = ruler.rule_generation(0.5) #, fixed_consequents=[('varis',)])
print(len(rules))
joblib.dump(rules, helpers.DATA_DIR + 'freq_rules_{:.3f}.pkl'.format(MINSUP))
#for (rule, conf) in rules:
# print(' -> %s \t conf: {:.2f} \t supp: {:.3f}'.format(conf, ruler.support(*rule))) | razz0/DataMiningProject | src/observation_basket_analysis.py | observation_basket_analysis.py | py | 1,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "apriori.NUM_CORES",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "helpers.read_observation_basket",
"line_number": 22,
"usage_type": "call"
},
{
"ap... |
36808206999 | import os
import copy
from typing import Dict, Union
from torch.utils.data import Dataset
import torchio as tio
from .subject_loaders import SubjectLoader
from .subject_filters import SubjectFilter, ComposeFilters
class SubjectFolder(Dataset):
""" A PyTorch Dataset for 3D medical data.
Args:
root: Path to the root of the subject folder dataset.
subject_path: Path to folder containing subjects, relative to the root.
Each subject must have their own folder within the subject_path.
subject_loader: A SubjectLoader pipeline that loads subject data from the subject folders.
cohorts: An optional dictionary that defines different subject cohorts in this dataset.
The dictionary keys are cohort names, and the values are ``SubjectFilter``s.
The active cohort can be set with the ``set_cohort(cohort_name)`` method.
A special cohort name ``'all'`` may be provided to define a filter that is applied
to all subjects.
transforms: Optional ``tio.Transform``s that are applied to each subject.
This can be a single transformation pipeline, or a dictionary that defines
a number of pipelines.
The key ``"default"`` can be used to set a default transformation pipeline
when no cohort is active.
If a matching key is in `cohorts`, then that transformation will become
active when ``set_cohort(cohort_name)`` is called.
A transformation can also be explicitly set with ``set_transform(transform_name)``.
"""
def __init__(
self,
root: str,
subject_path: str,
subject_loader: SubjectLoader,
cohorts: Dict[str, SubjectFilter] = None,
transforms: Union[tio.Transform, Dict[str, tio.Transform]] = None,
ref_img = None
):
self.root = root
self.subject_path = os.path.join(self.root, subject_path)
self.subject_loader = subject_loader
self.cohorts = {} if cohorts is None else cohorts
self.transforms = transforms
self.ref_img = ref_img
self._preloaded = False
self._pretransformed = False
# Loops through all subjects in the directory
subjects = []
subject_names = os.listdir(self.subject_path)
for subject_name in subject_names:
# The subject_data dictionary will be used to initialize the tio.Subject
subject_folder = os.path.join(self.subject_path, subject_name)
subject_data = dict(name=subject_name, folder=subject_folder)
# Apply subject loaders
self.subject_loader(subject_data)
# torchio doesn't like to load a subject with no images
if not any(isinstance(v, tio.Image) for v in subject_data.values()):
continue
subject = tio.Subject(**subject_data)
if self.ref_img:
transform = tio.CopyAffine(self.ref_img)
subject = transform(subject)
subjects.append(subject)
if "all" in self.cohorts:
all_filter = self.cohorts['all']
subjects = all_filter(subjects)
self.active_cohort = 'all'
self.all_subjects = None
self.all_subjects_map = None
self.subjects = None
self.subjects_map = None
self.excluded_subjects = None
self.transform = None
self.set_all_subjects(subjects)
def set_all_subjects(self, subjects):
subjects.sort(key=lambda subject: subject['name'])
self.all_subjects = subjects
self.all_subjects_map = {subject['name']: subject for subject in subjects}
self.set_cohort(self.active_cohort)
def set_subjects(self, subjects):
self.subjects = subjects
self.subjects_map = {subject['name']: subject for subject in subjects}
self.excluded_subjects = [subject for subject in self.all_subjects
if subject not in self.subjects]
def set_cohort(self, cohort: Union[str, SubjectFilter]):
self.active_cohort = cohort
if isinstance(cohort, str):
self.set_transform(cohort)
if cohort == "all" or cohort is None:
self.set_subjects(self.all_subjects)
elif cohort in self.cohorts:
subject_filter = self.cohorts[cohort]
self.set_subjects(subject_filter(self.all_subjects))
else:
raise ValueError(f"Cohort name {cohort} is not defined in dataset cohorts: {self.cohorts}.")
if isinstance(cohort, SubjectFilter):
self.set_transform('default')
subject_filter = cohort
self.set_subjects(subject_filter(self.all_subjects))
def set_transform(self, transform: Union[str, tio.Transform]):
if isinstance(transform, str):
transform_name = transform
if self.transforms is None:
self.transform = None
elif isinstance(self.transforms, tio.Transform):
self.transform = self.transforms
elif isinstance(self.transforms, dict):
if transform_name in self.transforms:
self.transform = self.transforms[transform_name]
elif 'default' in self.transforms:
self.transform = self.transforms['default']
else:
self.transform = None
elif isinstance(transform, tio.Transform):
self.transform = transform
else:
raise ValueError()
def get_cohort_dataset(self, cohort: Union[str, SubjectFilter]) -> 'SubjectFolder':
transforms = self.transforms
if isinstance(cohort, str):
subject_filter = self.cohorts[cohort]
if isinstance(transforms, dict):
transforms = transforms.copy()
if cohort in transforms:
transforms['default'] = transforms[cohort]
del transforms[cohort]
elif isinstance(cohort, SubjectFilter):
subject_filter = cohort
else:
raise ValueError()
cohorts = self.cohorts.copy()
if 'all' in cohorts:
cohorts['all'] = ComposeFilters(cohorts['all'], subject_filter)
else:
cohorts['all'] = subject_filter
return SubjectFolder(self.root, self.subject_path, self.subject_loader, cohorts, transforms, ref_img=self.ref_img)
def __len__(self):
return len(self.subjects)
def __getitem__(self, idx):
# Get subjects by an integer ID in 0..N, or by the subject's folder name
if isinstance(idx, int):
subject = self.subjects[idx]
elif isinstance(idx, str):
subject = self.subjects_map[idx]
else:
raise ValueError(f"Subject index must be an int or a string, not {idx} of type {type(idx)}")
# Load subject and apply transform
subject = copy.deepcopy(subject)
if not self._preloaded:
subject.load()
if not self._pretransformed and self.transform is not None:
subject = self.transform(subject)
return subject
def __contains__(self, item):
if isinstance(item, int):
return item < len(self)
if isinstance(item, str):
return item in self.subjects_map
if isinstance(item, tio.Subject):
return item in self.subjects
return False
# Preloads the images for all subjects. Typically they are lazy-loaded in __getitem__.
def preload_subjects(self):
if self._preloaded:
return
self._preloaded = True
loaded_subjects = []
for subject in self.all_subjects:
subject = copy.deepcopy(subject)
subject.load()
loaded_subjects.append(subject)
self.set_all_subjects(loaded_subjects)
self.set_cohort(self.active_cohort)
def preload_and_transform_subjects(self):
if self._pretransformed:
return
self.preload_subjects()
if self.transform is not None:
self._pretransformed = True
self.set_all_subjects([self.transform(subject) for subject in self.subjects])
# TODO: Do this better.
def load_additional_data(self, path: str, subject_loader: SubjectLoader):
subject_names = os.listdir(path)
for subject_name in subject_names:
subject_folder = os.path.join(path, subject_name)
subject_data = dict(name=subject_name, folder=subject_folder)
subject_loader(subject_data)
del subject_data['name']
del subject_data['folder']
# find the first subject with matching name, else return None
matched_subject = next((subject for subject in self.subjects if subject['name'] == subject_name), None)
if matched_subject is not None:
# update the primary object so other references such as subject_map are updated
matched_subject.update(subject_data)
| efirdc/Segmentation-Pipeline | segmentation_pipeline/data_processing/subject_folder.py | subject_folder.py | py | 9,208 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "subject_loaders.SubjectLoader",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "su... |
38406688388 | import pandas as pd
import matplotlib.pyplot as plt
import re # regular expression
df = pd.read_csv('./csv/Travel details dataset.csv')
# drop the rows with missing values
df = df.dropna()
# [OPTIONAL] pick country name only after the comma
df['Destination'] = df['Destination'].apply(lambda x: x.split(', ')[1] if ',' in x else x)
# Convert the date columns to datetime
df['Start date']=pd.to_datetime(df['Start date'])
df['End date']=pd.to_datetime(df['End date'])
# Modify column for cost, format it into numerical values
# define a regular expression pattern to match numeric values
pattern = re.compile(r'\d+(,\d+)*\.?\d*')
# apply the regular expression pattern to the column and convert the resulting strings to numeric data type
df['Accommodation cost'] = df['Accommodation cost'].apply(lambda x: float(pattern.search(x).group().replace(',', '')) if pattern.search(x) else None)
df['Transportation cost'] = df['Transportation cost'].apply(lambda x: float(pattern.search(x).group().replace(',', '')) if pattern.search(x) else None)
def show_chart_1():
"""Number of trips per Destination Chart"""
# Count the number of trips per destination
trips_per_destination = df['Destination'].value_counts()
# create smaller figure
fig, ax = plt.subplots(figsize=(6, 3))
ax.bar(x=trips_per_destination.index, height=trips_per_destination.values)
fig.subplots_adjust(bottom=0.3)
# plt.bar(x=trips_per_destination.index, height=trips_per_destination.values)
plt.xlabel('Tujuan Wisata')
plt.ylabel('Jumlah')
plt.xticks(rotation=360-90)
plt.show()
# Accomodation type distribution
def show_chart_2():
"""Accomodation type distribution"""
trips_per_accommodation_type = df['Accommodation type'].value_counts()
plt.pie(x=trips_per_accommodation_type.values, labels=trips_per_accommodation_type.index, autopct='%1.1f%%')
plt.title('Tipe Akomodasi')
plt.show()
# Number of trips per month
def show_chart_3():
"""Number of trips per month"""
# Give choice to user to select month or year
print('Pilih periode')
print('1. Bulan (M)')
print('2. Tahun (Y)')
inp = ''
while inp.upper() != 'M' and inp.upper() != 'Y':
inp = input('Periode (M/Y) : ')
if inp.upper() == 'M' or inp.upper() == 'Y':
break
else:
print('Tolong masukkan huruf M atau Y')
# Convert start date to datetime
df['Start date'] = pd.to_datetime(df['Start date'])
# Group the trips by month
if inp.upper() == 'M':
trips_per_month = df.groupby(df['Start date'].dt.strftime('%Y-%m'))['Trip ID'].count()
else:
trips_per_month = df.groupby(df['Start date'].dt.strftime('%Y'))['Trip ID'].count()
fig, ax = plt.subplots(figsize=(10, 5))
# need more space because of the month labels longer than year labels
if inp.upper() == 'M':
fig.subplots_adjust(bottom=.15)
plt.plot(trips_per_month.index, trips_per_month.values)
plt.xticks(rotation=360-90)
plt.xlabel('Bulan')
if inp.upper() == 'Y':
plt.bar(x=trips_per_month.index, height=trips_per_month.values)
plt.xlabel('Tahun')
plt.ylabel('Jumlah')
plt.show()
# Travel Nationalist
def show_chart_4():
"""Travel Nationalist"""
nationalities = df['Traveler nationality'].value_counts()
fig, ax = plt.subplots(figsize=(10, 5))
fig.subplots_adjust(bottom=.25)
plt.bar(x=nationalities.index, height=nationalities.values)
plt.xlabel("Negara asal wisatawan")
plt.ylabel("Jumlah")
plt.xticks(rotation=360-90)
plt.show()
# Transportation cost by Accomodation type
def show_chart_5():
"""Transportation cost by Accomodation type"""
# box chart with x = accomodation_type y=accomodation_cost
labels = df['Accommodation type'].unique()
all_data = [df[df['Accommodation type'] == accomodation_type]['Transportation cost'] for accomodation_type in labels]
plt.boxplot(all_data, labels=labels)
plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda x, loc: "${:,}".format(int(x))))
plt.xlabel('Tipe Akomodasi')
plt.ylabel('Biaya Transportasi')
plt.title('Biaya Transportasi Berdasarkan Tipe Akomodasi')
plt.show()
def show_chart_6():
"""Number of Trips per Gender"""
gender_counts = df['Traveler gender'].value_counts()
fig, ax = plt.subplots(figsize=(6, 3))
labels_name = {'Male': 'Laki Laki', 'Female': 'Perempuan'} # translation purpose
labels_color = {'Male': '#3258a8', 'Female': '#f59dd0'} # blue and pink
label = gender_counts.index.map(lambda x: labels_name[x])
colors = gender_counts.index.map(lambda x: labels_color[x])
plt.pie(x=gender_counts.values, labels=label, autopct='%1.1f%%', colors=colors)
plt.title('Data Wisatawan Berdasarkan Jenis Kelamin')
plt.show()
| mbenkzz/pyt11kelompok13 | functions.py | functions.py | py | 4,955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.compile",
... |
19150508999 | import tensorflow as tf
from .util.datasetUtil import dataset , filelength
from tensorflow.keras.applications import VGG16,VGG19 ,InceptionV3
from .util.Callbacks import CustomCallback
import datetime
class inference_load():
def __init__(self,params,csvPath):
print(params)
self.csvPath = './dataset/dataset.csv'
self.evalPath = './dataset/Evaldataset.csv'
self.model = params['model']
self.inputShape = (int(params['inputShape']),int(params['inputShape']),3)
self.include_top = False
self.loss = params['loss']
self.optimizer = params['optimizer']
self.batch_size = int(params['batch_size'])
#self.metrics = [metric for metric in params['metrics']]
self.metrics = ['acc']
self.n_classes = params['n_classes']
self.learning_rate = float(params['learning_rate'])
self.epochs = int(params['epochs'])
self.modelOutputPath = 'server/results/{datetime}_{epochs}_saved_model.h5'.format(
datetime = str(datetime.datetime.now())[:10].replace('-','_'),
epochs = self.epochs
)
def load(self,x):
if(self.model.lower() == 'vgg16'):
model = VGG16(include_top=self.include_top,input_shape = self.inputShape,weights='imagenet',input_tensor=x)
return model
elif(self.model.lower() == 'vgg19'):
model = VGG19(include_top=self.include_top,input_shape = self.inputShape,weights='imagenet',input_tensor=x)
return model
elif(self.model.lower() == 'inception'):
model = InceptionV3(include_top=self.include_top,input_shape=self.inputShape,weights='imagenet',input_tensor=x)
return model
def lossParser(self):
if self.loss == 'Categorical Cross Entropy':
return 'categorical_crossentropy'
elif self.loss == 'Binary Cross Entropy':
return 'binary_crossentropy'
elif self.loss == 'Hinge':
return 'categorical_hinge'
elif self.loss == 'Mean Square Error':
return 'mean_squared_error'
def OptimizerSelector(self):
if self.optimizer == 'Adam':
return tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
elif self.optimizer == 'SGD':
return tf.keras.optimizers.SGD(learning_rate=self.learning_rate)
elif self.optimizer == 'RMSProp':
return tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate)
elif self.optimizer == 'Adadelta':
return tf.keras.optimizers.Adadelta(learning_rate=self.learning_rate)
elif self.optimizer == 'Adagrad':
return tf.keras.optimizers.Adagrad(learning_rate=self.learning_rate)
elif self.optimizer == 'Nadam':
return tf.keras.optimizers.Nadam(learning_rate=self.learning_rate)
elif self.optimizer == 'AdaMax':
return tf.keras.optimizers.AdaMax(learning_rate=self.learning_rate)
# For Vanish Gradient
def add_regularization(model, regularizer=tf.keras.regularizers.l2(0.0001)):
if not isinstance(regularizer, tf.keras.regularizers.Regularizer):
return model
for layer in model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
return model
def run(self):
trainDataset , testDataset = dataset(self.csvPath,self.evalPath,self.batch_size,self.inputShape[:2])
if len(trainDataset) == 0:
return -1
file_length = filelength(self.csvPath)
x = tf.keras.Input(shape=self.inputShape)
model = self.load(x)
model = self.add_regularization(model)
flat_layer = tf.keras.layers.Flatten()(model.layers[-1].output)
classfictaion = tf.keras.layers.Dense(int(self.n_classes),activation='softmax')(flat_layer)
steps_per_epoch = int(file_length/self.batch_size)
model = tf.keras.Model(inputs=x,outputs=classfictaion)
model.compile(loss=self.lossParser(),metrics=self.metrics,optimizer=self.OptimizerSelector())
model.summary()
model.fit_generator(
trainDataset,
validation_data=testDataset,
validation_steps=5,
epochs=self.epochs,
steps_per_epoch=steps_per_epoch,
callbacks = [CustomCallback()]
)
model.save(self.modelOutputPath) | kococo-code/Tensorflow_Automatic_Training | server/api/inference/model.py | model.py | py | 4,512 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.applications.VGG16",
"line_number": 30,
"usage_type": "call"
},
{
"... |
4822893898 | import base64
import sys
from github import Github
ACCESS_TOKEN = ''
REPO_NAME = ''
ACCESS_TOKEN = sys.argv[1]
REPO_NAME = sys.argv[2]
g = Github(ACCESS_TOKEN)
repo = g.get_repo(REPO_NAME)
contents = repo.get_contents("/README.md")
contents_bkp = repo.get_contents("/docs/README.md")
base = contents.content
base = base.replace('\n', '')
text = base64.b64decode(base).decode('utf-8')
base_bkp = contents_bkp.content
base_bkp = base_bkp.replace('\n', '')
text_bkp = base64.b64decode(base_bkp).decode('utf-8')
if text != text_bkp:
repo.update_file(contents_bkp.path,
"docs(README): synchronize README files", text,
contents_bkp.sha)
else:
pass
| BobAnkh/LinuxBeginner | scripts/sync.py | sync.py | py | 697 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "github.Github",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_... |
16921327452 | #!/usr/bin/env python3
"""
Advent of Code 2022 - Elf Rucksack Reorganization
A given rucksack always has the same number of items in each of its two compartments
Lowercase item types a through z have priorities 1 through 26.
Uppercase item types A through Z have priorities 27 through 52.
Find the item type that appears in both compartments of each rucksack.
What is the sum of the priorities of those item types?
"""
import argparse
from typing import List
def get_priority(letter: str) -> int:
"""Assigns the priority to each item passed in
Args:
letter: single alphabetical letter in upper or lower case
Returns: conversion of character to expected int
"""
# ord "a" = 97
# ord "A" = 65
if letter.isupper() is False:
return ord(letter) - 96
return ord(letter) - 38
def compare_comparts(compartment1: str, compartment2: str) -> str:
"""Returns the priority of item in common between compartments
Args:
compartment1: string of letters representing items
compartment2: string of letters representing items
Returns: string of single letter shared between compartment[1|2]
"""
item_in_common = list(set(compartment1).intersection(compartment2))
return get_priority(item_in_common[0])
def elf_grouping(rucksacks: list, group_size: int) -> list:
"""Returns list of elf groups of specified size
Args:
rucksacks: list of all the rucksacks in camp
group_size: number of elves in a rucksack priority group
Returns: list of organised elf groups by group_size
"""
return [rucksacks[x:x+group_size] for x in range(0, len(rucksacks), group_size)]
def fetch_badge(group_of_bags: List[list]) -> int:
"""Returns the priority of item in common for an elf grouping
Args:
group_of_bags: A list containing the list of elf rucksacks in a group
Returns: the shared badge priority for the group of elves
"""
badge = list(set.intersection(*map(set, group_of_bags)))
return get_priority(badge[0])
# pylint: disable=R0914
def main() -> None:
"""Main function to generate total score of elven rock, paper, scissors
"""
parser = argparse.ArgumentParser("Input list to work out the elf game outcomes")
parser.add_argument("--input_list", help="Input rucksack list as txt file", type=str)
parser.parse_args()
args = parser.parse_args()
rucksacks = []
input_file = args.input_list
with open (input_file, encoding="utf-8") as file:
rucksack_in = file.read()
rucksacks = rucksack_in.splitlines()
item_dups = []
for rucksack in rucksacks:
rucksack_objs = list(rucksack)
rucksack_size = len(rucksack_objs)
compartment_size = int(rucksack_size / 2)
comp1 = slice(0, compartment_size)
comp2 = slice(compartment_size, int(len(rucksack_objs)))
item_dups.append(compare_comparts(rucksack_objs[comp1], rucksack_objs[comp2]))
sum_of_priorities = sum(item_dups)
print(f"Sum of priorities in duplicates: {sum_of_priorities}\n")
rucksack_list = [ list(x) for x in rucksacks ]
rucksack_groups = elf_grouping(rucksack_list, 3)
badge_priorities = [ fetch_badge(x) for x in rucksack_groups ]
sum_badge_priors = sum(badge_priorities)
print(f"Sum of badge priorities: {sum_badge_priors}\n")
if __name__ == main():
main()
| CristiGuijarro/NN-aoc-2022 | scripts/elf_rucksack_priorities.py | elf_rucksack_priorities.py | py | 3,390 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 70,
"usage_type": "call"
}
] |
26409439119 |
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
dict = {}
res = []
lista = []
#哈哈!这个是我自己写的hash table计算单词出现的频率 牛逼吧
#但是性能没下面的好嘻嘻
'''
for i in words:
dict[i]=len([x for x in words if x == i])
'''
#用collections里面的Counter函数专门用来统计每个元素出现的频率
from collections import Counter
dict = (Counter(words))
''' 关于Counter这个函数:
from collections import Counter
listb = ["i","love","leetcode","i","love","coding"]
a = (Counter(listb))
print(a)
>>>Counter({'i': 2, 'love': 2, 'leetcode': 1, 'coding': 1})
print(dict(a))
>>>{'i': 2, 'love': 2, 'leetcode': 1, 'coding': 1}
'''
lst = list(dict.items())
#返回一个list,里面是元组(键和值)
for i in lst:
lista.append((i[1]*(-1),i[0]))
#将键和值在元组里换位置,把值(频率)放在前面*(-1),键(单词)放在后面
heapq.heapify(lista)
#堆化 化成小堆,因为乘了负1,所以在最上面的绝对值最大,频率最大
for _ in range(k):
element = heapq.heappop(lista)
res.append(element[1])
return res
| lpjjj1222/leetcode-notebook | 692. Top K Frequent Words.py | 692. Top K Frequent Words.py | py | 1,460 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 19,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.