index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
997,600
425dc6f120334a6a92afc53278b331db1d6d5d21
_offset_main_0: BeginFunc 4 ; _tmp0 = 0 ; Return _tmp0 ; EndFunc ; f_main: main: BeginFunc 24 ; _tmp1 = 1 ; IfZ _tmp1 Goto _L1 ; _tmp2 = 1 ; PushParam _tmp2 ; LCall _PrintInt ; PopParams 4 ; Goto _L0 ; _L1: _tmp3 = 2 ; PushParam _tmp3 ; LCall _PrintInt ; PopParams 4 ; _L0: _tmp4 = 3 ; PushParam _tmp4 ; LCall _PrintInt ; PopParams 4 ; _tmp5 = 1 ; IfZ _tmp5 Goto _L2 ; _tmp6 = 4 ; PushParam _tmp6 ; LCall _PrintInt ; PopParams 4 ; _L2: EndFunc ;
997,601
eb31053918e28ee0a4c0c74aa2e4080851844b2c
import os import json import numpy as np import tqdm from PIL import Image import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.utils import Sequence from utils import transform_targets, preprocess_image , yolo_anchors, yolo_anchor_masks, load_fake_dataset class COCODataset(Sequence): IMAGE_WIDTH = 416 IMAGE_HEIGHT = 416 IMAGE_CHANNEL = 3 MAX_BBOXES = 100 def __init__(self,dataroot, imagedir,jsonfile, batch_size,shuffle=False, *args, **kwargs): super(COCODataset, self).__init__(*args, **kwargs) self.dataroot = dataroot self.imagedir = imagedir self.jsonfile = jsonfile self.batch_size = batch_size self.shuffle=shuffle self.imgs = dict() self.imgs_info = dict() self.annos = dict() self.categories = dict() with open(os.path.join(dataroot,self.jsonfile), "r") as json_file: self.json_info = json.load(json_file) for category in tqdm.tqdm(self.json_info["categories"]): self.categories[category["name"]] = category["id"] for img in tqdm.tqdm(self.json_info["images"]): self.imgs[img["id"]] = os.path.join(imagedir,img["file_name"]) self.imgs_info[img["id"]] = {"width":img["width"],"height":img["height"]} self.annos[img["id"]] = [] self.instances_max = 0 #for annotation in tqdm.tqdm(json_info["annotations"]): for annotation in self.json_info["annotations"]: img_width = self.imgs_info[annotation["image_id"]]["width"] img_height = self.imgs_info[annotation["image_id"]]["height"] stx = annotation["bbox"][0] sty = annotation["bbox"][1] endx = annotation["bbox"][2] + stx endy = annotation["bbox"][3] + sty # stx = stx/float(img_width) # sty = sty/float(img_height) # endx = endx/float(img_width) # endy = endy/float(img_height) bbox = np.asarray([stx,sty,endx,endy]+[int(annotation["category_id"]) - 1],dtype=np.float32) self.annos[annotation["image_id"]].append(bbox) if self.instances_max < len(self.annos[annotation["image_id"]]): self.instances_max = len(self.annos[annotation["image_id"]]) def __getitem__(self, index): image_ids = list(self.annos.keys())[index*self.batch_size:(index+1)*self.batch_size] batch_images = np.zeros((self.batch_size,self.IMAGE_HEIGHT,self.IMAGE_WIDTH,self.IMAGE_CHANNEL)) batch_annotations = np.zeros((self.batch_size,self.MAX_BBOXES,5)) for n, image_id in enumerate(image_ids): image = tf.image.decode_jpeg(open(self.imgs[image_id], 'rb').read(), channels=3) image = tf.cast(image,dtype=tf.float32) image = preprocess_image(image,self.IMAGE_WIDTH) batch_images[n, :, :, :] = image for n ,image_id in enumerate(image_ids): for m, anno in enumerate(self.annos[image_id]): batch_annotations[n,m,:] = anno batch_images = tf.convert_to_tensor(batch_images, tf.float32) batch_annotations = tf.convert_to_tensor(batch_annotations, tf.float32) #colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]]) #print(batch_annotations[0,:,0:4]) # images_with_boxes = tf.image.draw_bounding_boxes(batch_images[:,:,:,:], batch_annotations[:,:,0:4],colors) # for i in range(self.batch_size): # plt.imshow(images_with_boxes[i,:,:,:]) # plt.show() batch_annotations = transform_targets(batch_annotations, yolo_anchors, yolo_anchor_masks,self.IMAGE_WIDTH) return batch_images, batch_annotations, image_ids def __len__(self): return int(len(self.imgs)/self.batch_size) def getCOCOJson(self): return self.json_info def getCOCOImageInfo(self): return self.imgs_info def getCOCOAnnotationPath(self): return os.path.join(self.dataroot,self.jsonfile)
997,602
f15082af78abff1e4f20d96c48408258238db24d
import h5py import math import random import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np import pandas as pd """ Created by Mohsen Naghipourfar on 9/2/18. Email : mn7697np@gmail.com or naghipourfar@ce.sharif.edu Website: http://ce.sharif.edu/~naghipourfar Github: https://github.com/naghipourfar Skype: mn7697np """ MY_DPI = 192 RADIUS = 10 WIDTH = 10 def draw_ring(n_points=250): xs, ys = [], [] r = random.random() * RADIUS for i in range(n_points): p = 2 * math.pi * random.random() x = r * math.cos(p) y = r * math.sin(p) x += 1 y += 1 xs.append(x) ys.append(y) return xs, ys def draw_circle(n_points=250): xs, ys = [], [] for i in range(n_points): p = random.random() * 2 * math.pi r = 1 * math.sqrt(random.random()) x = math.cos(p) * r y = math.sin(p) * r xs.append(x) ys.append(y) return xs, ys def draw_square_ring(n_points=250): width = random.random() * WIDTH std = 0.025 np.random.seed(int(n_points * width)) x = list(np.random.normal(loc=0.0, scale=std, size=[n_points // 4])) x += list(np.random.uniform(0.0, width, size=[n_points // 4])) x += list(np.random.normal(loc=width, scale=std, size=[n_points // 4])) x += list(np.random.uniform(0.0, width, size=[n_points // 4])) np.random.seed(int(n_points * width + 1)) y = list(np.random.uniform(0.0, width, size=[n_points // 4])) y += list(np.random.normal(loc=width, scale=std, size=[n_points // 4])) y += list(np.random.uniform(0.0, width, size=[n_points // 4])) y += list(np.random.normal(loc=0.0, scale=std, size=[n_points // 4])) return x, y def rotate_point(xs, ys, degree, origin=(0, 0)): degree = math.radians(degree) bias = random.random() * 5 for idx, x in enumerate(xs): y = ys[idx] ox, oy = origin x -= ox y -= oy cos = math.cos(degree) sin = math.sin(degree) rotated_x = cos * x - sin * y rotated_y = sin * x + cos * y xs[idx], ys[idx] = rotated_x + ox + bias, rotated_y + oy + bias return xs, ys def plot_shapes(xs, ys, idx=0): result_path = "../Results/train/" + "image-" + str(idx) + ".png" plt.axis('off') plt.savefig(result_path, dpi=MY_DPI) data = plt.imread(result_path)[:, :, 0] plt.imsave(result_path, data, cmap=cm.gray) def main(): for idx in range(100): plt.close("all") plt.figure(figsize=(128 / MY_DPI, 128 / MY_DPI), dpi=MY_DPI) number_of_objects = 5 shapes = ['square ring', 'ring'] xs, ys = [], [] for shape in shapes: for i in range(random.randint(1, number_of_objects)): x, y = [], [] if shape == "circle": x, y = draw_circle(n_points=300) elif shape == "square ring": x, y = draw_square_ring(n_points=300) elif shape == "ring": x, y = draw_ring(n_points=300) degree = random.random() * 180 x, y = rotate_point(x, y, degree, origin=(0, 0)) xs = xs + x ys = ys + y plt.plot(xs, ys, 'o') plot_shapes(xs, ys, idx) def create_hdf5(): data = pd.read_csv("../Data/3mermotif_na.csv", index_col="icgc_sample_id") with h5py.File("../Data/3mermotif.h5", "w ") as f: f.create_dataset("") if __name__ == '__main__': # x, y = [1], [1] # degree = 45 # print(rotate_point(x, y, degree)) main()
997,603
600be341e7dfed4f58ae3867f3a265bc13b41229
import sys #TODO: auto convert component __init__ into initialize_variables() def convert(): """A crude converter for OpenMDAO v1 files to OpenMDAO v2""" cvt_map = { '.add(' : '.add_subsystem(', '.add_param(' : '.add_input(', '.params': '._inputs', '.unknowns': '._outputs', '.resids': '._residuals', 'openmdao.test.util': 'openmdao.devtools.testutil', 'def solve_nonlinear(self, params, unknowns, resids)': 'def compute(params, unknowns)', } with open(sys.argv[1], 'r') as f: contents = f.read() for old, new in cvt_map.items(): contents = contents.replace(old, new) sys.stdout.write(contents) if __name__ == '__main__': convert()
997,604
e042b7821ceb2fbee5305b0a5fe86ab8284b8d5b
from sqlalchemy import ForeignKeyConstraint from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from application import db class StationBase(object): def refuel(self, liters, kilometers): self.liters += liters self.kilometers += kilometers self.average_consumption = (self.liters / self.kilometers) * 100 class User(db.Model): __tablename__ = 'users' id = db.Column(db.String(255), primary_key=True) # gas_stations = association_proxy("user_gas_stations", 'gas_stations') def __init__(self, id): self.id = id def __repr__(self): return '<Id {}>'.format(self.id) class UserGasStation(db.Model, StationBase): id = db.Column(db.String(255), primary_key=True) __tablename__ = 'user_gas_station' user_id = db.Column(db.String(255), db.ForeignKey("users.id"), primary_key=True) gas_station_city = db.Column(db.String(80), primary_key=True) gas_station_address = db.Column(db.String(80), primary_key=True) kilometers = db.Column(db.Float) liters = db.Column(db.Float) gas_station = db.relationship("GasStation") user = db.relationship("User") __table_args__ = (ForeignKeyConstraint([gas_station_city, gas_station_address], ["gas_stations.city", "gas_stations.address"]), {}) def __init__(self, user_id, gas_station_city, gas_station_address): self.user_id = user_id self.gas_station_city = gas_station_city self.gas_station_address = gas_station_address def __repr__(self): return '<Id {}>'.format(self.id) class GasStation(db.Model, StationBase): __tablename__ = "gas_stations" city = db.Column(db.String(80), primary_key=True) address = db.Column(db.String(80), primary_key=True) name = db.Column(db.String(80)) kilometers = db.Column(db.Float) liters = db.Column(db.Float) average_consumption = db.Column(db.Float) def __init__(self, city, address, name): self.city = city self.address = address self.name = name self.kilometers = 0 self.liters = 0 self.average_consumption = 100 def __repr__(self): return '<Name: {0}; City: {1}; Address: {2}>'.format(self.name, self.city, self.address)
997,605
80d322a5d3d0385ffce474aeefa1928c11d713c8
TC = int(input()) for tc in range(1, TC+1): N, M = map(int, input().split()) v = list(map(int, input().split())) sum = 0 for i in range (M): sum += v[i] minv = maxv = sum for i in range(1, N - M + 1): sum = 0 for j in range(i, i + M): sum += v[j] if maxv < sum : maxv = sum if minv > sum : minv = sum print("#%d %d" % (tc, maxv - minv))
997,606
abf96d9b253e00b3150b44e9bac4500e7a2daff3
from rest_framework.views import APIView from django.views.decorators.csrf import csrf_exempt from rest_framework.response import Response from rest_framework import status import json from django.core import serializers from .models import * from .serializers import * from rest_framework import parsers from collections import namedtuple from django.contrib import admin from django.db.models import Count from django.db.models import F from .automaticmail import SendEmail from .OTPGenerator import GenerateOTP from .Autherizer import AuthRequired, LinkAutherizer from .monetize import MonetizeNotes from datetime import date import random import string class InitialLoad(APIView): def get(self, request, device_auth,format=None): if len(device_auth) != 16: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) if DeviceAuth.objects.filter(device_key=device_auth).exists(): app_version = AppVersion.objects.filter(id=1).first() app_force_update = AppForceUpdateRequired.objects.filter(id=1).first() mapped_key = DeviceAuth.objects.filter(device_key = device_auth).first() print(mapped_key.updated_on) return Response({"Auth_key":mapped_key.mapped_key, "app_version":app_version.version, "app_force_update":app_force_update.force_update_required, "user_type_old":"OLD_USER"}, status=status.HTTP_200_OK) else: mapped_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16)) new_data = DeviceAuth(device_key=device_auth, mapped_key=mapped_id) new_data.save() app_version = AppVersion.objects.filter(id=1).first() app_force_update = AppForceUpdateRequired.objects.filter(id=1).first() mapped_key = DeviceAuth.objects.filter(device_key = device_auth).first() #print(mapped_key.updated_on) return Response({"Auth_key":mapped_key.mapped_key, "app_version":app_version.version, "app_force_update":app_force_update.force_update_required, "user_type_new":"NEW_USER"}, status=status.HTTP_200_OK) class SnippetList(APIView): parser_classes = (parsers.MultiPartParser, parsers.FormParser,) serializer_class = NotesSerializer def get(self, request, sem, branch, subject, device_auth,format=None): if AuthRequired(device_auth) == True: MasterServiceHits.objects.filter(id=1).update(notes_hit=F('notes_hit') + 1) ws_semester = MasterSemesters.objects.filter(sem_name = sem).first() ws_branch = MasterBranches.objects.filter(branch_name = branch).first() ws_subject = MasterSubjects.objects.filter(subject_name = subject).first() Notes_raw = MasterNotes.objects.filter(semester = ws_semester, branch = ws_branch, subject = ws_subject) if not Notes_raw: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) notes_serializer = NotesSerializer(Notes_raw, many=True,context={'Device_key': device_auth} ).data return Response(notes_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class QuestionPaperList(APIView): parser_classes = (parsers.MultiPartParser, parsers.FormParser,) serializer_class = QuestionPaperSerializer def get(self, request, sem, branch, subject, device_auth, format=None): if AuthRequired(device_auth) == True: MasterServiceHits.objects.filter(id=1).update(question_paper_hit=F('question_paper_hit') + 1) ws_semester = MasterSemesters.objects.filter(sem_name = sem).first() ws_branch = MasterBranches.objects.filter(branch_name = branch).first() ws_subject = MasterSubjects.objects.filter(subject_name = subject).first() Question_paper_raw = MasterQuestionPapers.objects.filter(semester = ws_semester, branch = ws_branch, subject = ws_subject) if not Question_paper_raw: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) question_paper_serializer = QuestionPaperSerializer(Question_paper_raw, many=True,context={'Device_key': device_auth} ).data return Response(question_paper_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class FetchMasterList(APIView): def get(self, request,device_auth, format=None): if AuthRequired(device_auth) == True: MasterRecords = namedtuple('MasterRecords', ('branches', 'semester')) master = MasterRecords( branches=MasterBranches.objects.all(), semester=MasterSemesters.objects.all(), ) serializer = NotesMasterSerializer(master,context={'Device_key': device_auth} ).data return Response(serializer) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class FetchSubject(APIView): def get(self, request, sem, branch, device_auth, format=None): if AuthRequired(device_auth) == True: ws_semester = MasterSemesters.objects.filter(sem_name = sem).first() ws_branch = MasterBranches.objects.filter(branch_name = branch).first() Sub_raw = MasterSubjects.objects.filter(subject_semester = ws_semester, subject_branch = ws_branch) if not Sub_raw: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) sub_serializer = SubjectSerializer(Sub_raw, many=True,context={'Device_key': device_auth} ).data return Response(sub_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class LabManualVid(APIView): def get(self, request, sem, branch, subject, program_id, device_auth, format=None): if AuthRequired(device_auth) == True: MasterServiceHits.objects.filter(id=1).update(lab_manual_video_hit=F('lab_manual_video_hit') + 1) ws_semester = MasterSemesters.objects.filter(sem_name = sem).first() ws_branch = MasterBranches.objects.filter(branch_name = branch).first() ws_subject = MasterSubjects.objects.filter(subject_name = subject).first() video_master = MasterVideoLab.objects.filter(semester=ws_semester,subject=ws_subject,branch=ws_branch,programid=program_id) if not video_master: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) video_serializer = MasterVideoLabSerializer(video_master, many=True,context={'Device_key': device_auth} ).data return Response(video_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class LoadSyllabusCopy(APIView): def get(self, request, branch, device_auth, format=None): if AuthRequired(device_auth) == True: MasterServiceHits.objects.filter(id=1).update(syllabus_copy_hit=F('syllabus_copy_hit') + 1) ws_branch = MasterBranches.objects.filter(branch_name = branch).first() syllabus_master = MasterSyllabusCopy.objects.filter(branch=ws_branch) if not syllabus_master: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) syllabus_serilizer = LoadSyllabusCopySerializer(syllabus_master, many=True,context={'Device_key': device_auth} ).data return Response(syllabus_serilizer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class LoadAbout(APIView): def get(self, request, device_auth, format=None): if AuthRequired(device_auth) == True: about_master=MasterAbout.objects.all() if not about_master: return Response({ "ERROR":"404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) about_serializer = MasterAboutSerializer(about_master, many=True, context={'Device_key': device_auth} ).data return Response(about_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR":"Access Denied"}, status=status.HTTP_404_NOT_FOUND) class TrackDownloads(APIView): def get(self, request, type, id, device_auth, format=None): if AuthRequired(device_auth) == True: mapped_key = DeviceAuth.objects.filter(device_key=device_auth) if type == 'Notes': if TrackNotesDownlods.objects.filter(device_id=device_auth, notes_id=id).exists(): data = TrackNotesDownlods.objects.filter(device_id=device_auth, notes_id=id).first() if data.date == date.today() and data.download_count < 10: MasterNotes.objects.filter(id=id).update(downloads=F('downloads') + 1) TrackNotesDownlods.objects.filter(notes_id=id,device_id=device_auth ).update(download_count=F('download_count') + 1) MonetizeNotes(id) elif data.date != date.today(): TrackNotesDownlods.objects.filter(notes_id=id, device_id=device_auth ).update(date=date.today()) TrackNotesDownlods.objects.filter(notes_id=id,device_id=device_auth ).update(download_count=1) MasterNotes.objects.filter(id=id).update(downloads=F('downloads') + 1) MonetizeNotes(id) else: tracker = TrackNotesDownlods(device_id=device_auth, notes_id=id,download_count = 1,date = date.today() ) tracker.save() MasterNotes.objects.filter(id=id).update(downloads=F('downloads') + 1) MonetizeNotes(id) return Response({"status":"O.K"}, status=status.HTTP_200_OK) elif type == 'Qpaper': MasterQuestionPapers.objects.filter(id=id).update(downloads=F('downloads') + 1) return Response({"status": "O.K"}, status=status.HTTP_200_OK) elif type == 'SBcopy': MasterSyllabusCopy.objects.filter(id=id).update(downloads=F('downloads') + 1) return Response({"status": "O.K"}, status=status.HTTP_200_OK) elif type == 'LabVid': MasterVideoLab.objects.filter(id=id).update(views=F('views') + 1) return Response({"status": "O.K"}, status=status.HTTP_200_OK) else: return Response({"ERROR":"OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class FeedBack(APIView): def post(self, request, format=None): if AuthRequired(request.data['device_id']) == True: serializer = FeedBackSerializer(data=request.data) if serializer.is_valid(): serializer.save() admin_emails = AdminEmailId.objects.all() context = { 'name': request.data['name'], 'feedback': request.data['feed_back'], 'device_id': request.data['device_id'], } subject = 'FeedBack has been given by ' + ' ' + request.data['name'] for reciever_mail in admin_emails: mail_value = SendEmail(reciever_mail.mail_reciever_email , context,subject, 'FeedBackMail.html') if mail_value == True: return Response({"status": "O.K"}, status=status.HTTP_200_OK) else: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "Form is not valid :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class LoadFeedBack(APIView): def get(self, request, device_auth, format=None): if AuthRequired(device_auth) == True: ws_terms = TermsAndConditions.objects.filter(id=1) if not ws_terms: return Response({ "ERROR": "404 NO DATA FOUND :("}, status=status.HTTP_404_NOT_FOUND) terms_serializer = TermsAndConditionsSerialier(ws_terms, many=True, context={'Device_key': device_auth} ).data return Response(terms_serializer, status=status.HTTP_200_OK) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class ContactUS(APIView): def post(self, request, format=None): if AuthRequired(request.data['device_id']) == True: if ContactUs.objects.filter(device_id = request.data['device_id']).exists(): old_contact_data = ContactUs.objects.filter(device_id = request.data['device_id']).first() if old_contact_data.email != request.data['email']: ContactUs.objects.filter(device_id=request.data['device_id']).update(user_verified = False) ContactUs.objects.filter(device_id=request.data['device_id']).update(name=request.data['name'], email=request.data['email'], contact=request.data['contact'], user_message=request.data['user_message']) if ContactUs.objects.filter(device_id = request.data['device_id'], user_verified = False): response = GenerateOTP(request.data['device_id'],request.data['email'], request.data['name'], 'C') if response == True: return Response({"status": "OTP has been shared"}, status=status.HTTP_200_OK) else: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: contact_details = ContactUs.objects.filter(device_id=request.data['device_id'], email=request.data['email'],user_verified = True).first() link_mapper = LinkAutherizer(request.data['email']) link = 'http://34.219.72.32/UserNotesUpload/' + str( contact_details.id) + '/' + contact_details.device_id + '/' + link_mapper context = { 'name': contact_details.name, 'contact': contact_details.contact, 'email': request.data['email'], 'message': contact_details.user_message, 'link': link, } print(link) subject = 'Thanks for contacting us!' mail_status = SendEmail(request.data['email'], context, subject, 'ThanksForContactingUs.html') if mail_status == False: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) return Response({"status": "User has been verified, no need of otp validation"}, status=status.HTTP_200_OK) else: serializer = ContactUsSerializer(data=request.data) if serializer.is_valid(): serializer.save() response = GenerateOTP(request.data['device_id'], request.data['email'], request.data['name'], 'C') if response == True: return Response({"status": "OTP has been shared"}, status=status.HTTP_200_OK) else: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class ValidateOTP(APIView): def get(self, request, otp, device_auth, format=None): if AuthRequired(device_auth) == True: otp_inside = OTPValidate.objects.filter(device_id=device_auth).first() if otp == str(otp_inside.otp): ContactUs.objects.filter(device_id = device_auth, email = otp_inside.email).update(user_verified = True) contact_details = ContactUs.objects.filter(device_id = device_auth, email = otp_inside.email,user_verified = True).first() link_mapper=LinkAutherizer(otp_inside.email) link = 'http://34.219.72.32/UserNotesUpload/'+str(contact_details.id)+'/'+contact_details.device_id+'/'+link_mapper context = { 'name':contact_details.name, 'contact': contact_details.contact, 'email':otp_inside.email, 'message':contact_details.user_message, 'link':link, } subject = 'Thanks for contacting us!' mail_status = SendEmail(otp_inside.email, context, subject, 'ThanksForContactingUs.html') if mail_status == False: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) subject = "Some user has contacted us!!!" admin_emails = AdminEmailId.objects.all() for reciever_mail in admin_emails: mail_status = SendEmail(reciever_mail.mail_reciever_email, context, subject, 'ThanksForContactingUs.html') if mail_status == False: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"status": "O.K"}, status=status.HTTP_200_OK) else: #ContactUs.objects.filter(device_id=device_auth, email=otp_inside.email).delete() return Response({"status":"OTP not matching"}, status=status.HTTP_403_FORBIDDEN) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class NotesTracker(APIView): def get(self, request, type, email_uniqueid, device_auth, format=None): if AuthRequired(device_auth) == True: if type == 'OLD': unique_id = email_uniqueid if EmailUniqueidMapper.objects.filter(mapped_id = unique_id).exists(): mappedData = EmailUniqueidMapper.objects.filter(mapped_id = unique_id).first() else: return Response({"ERROR": "REAuth required :("}, status=status.HTTP_404_NOT_FOUND) email = mappedData.email TrackerRecords = namedtuple('TrackerRecords', ('NotesTrack', 'Earnings')) Tracker = TrackerRecords( NotesTrack=MasterNotes.objects.filter(owner_email = email), Earnings=UserMoneyBucket.objects.filter(email = email), ) if not Tracker: return Response({"ERROR":"404 NO DATA FOUND :(", "Mapped_key": unique_id}, status=status.HTTP_404_NOT_FOUND) #serializer = TrackMasterSerializer(Tracker, context={'Device_key': device_auth} #).data notesTrackerSerializer = TrackMasterSerializer(Tracker, context={'Device_key': device_auth, 'Mapped_Key': unique_id, 'Email': email}).data return Response(notesTrackerSerializer, status=status.HTTP_200_OK) elif type == "NEW": email = email_uniqueid #if ContactUs.object.filter(email = email,user_verified=True).exists(): # pass #else: # return Response({"ERROR": "OOPS! kindly upload notes first to enable this feature"}, # status=status.HTTP_404_NOT_FOUND) response = GenerateOTP(device_auth, email, "user", "N") if response == True: return Response({"status": "OTP has been shared"}, status=status.HTTP_200_OK) else: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "OOPS! an internal error occured :("}, status=status.HTTP_404_NOT_FOUND) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND) class TrackerOTPValidater(APIView): def get(self, request, otp, email, device_auth, format=None): if AuthRequired(device_auth) == True: otp_inside = TrackerOTPValidate.objects.filter(email=email).first() if otp == str(otp_inside.otp): mapped_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16)) if EmailUniqueidMapper.objects.filter(email = email).exists(): EmailUniqueidMapper.objects.filter(email=email).update(mapped_id=mapped_id) else: mappedData = EmailUniqueidMapper(mapped_id=mapped_id, email=email, link_mapper=mapped_id, link_expiry=date.today()) mappedData.save() TrackerRecords = namedtuple('TrackerRecords', ('NotesTrack', 'Earnings')) Tracker = TrackerRecords( NotesTrack=MasterNotes.objects.filter(owner_email=email), Earnings=UserMoneyBucket.objects.filter(email=email), ) if not Tracker: return Response({"ERROR": "404 NO DATA FOUND :(", "Mapped_key": mapped_id}, status=status.HTTP_404_NOT_FOUND) # serializer = TrackMasterSerializer(Tracker, context={'Device_key': device_auth} # ).data notesTrackerSerializer = TrackMasterSerializer(Tracker, context={'Device_key': device_auth, 'Mapped_Key': mapped_id, 'Email': email}).data return Response(notesTrackerSerializer, status=status.HTTP_200_OK) else: return Response({"status": "OTP not matching"}, status=status.HTTP_403_FORBIDDEN) else: return Response({"ERROR": "Access Denied"}, status=status.HTTP_404_NOT_FOUND)
997,607
1bcdd37ce6ef6398c074a67550bbd9be6abeafe6
from . import account_move from . import sale_order from . import sale_order_line
997,608
0d8a7510a7699c667a21cd94ce8b1059e0ae3768
# 使用python代码解决生产中排产的可能组合的问题。该问题先以没有人员约束为起点,为任意数量 # 的釜来排产,举例来说排两个釜,可选择不同产品种类组合,每个釜只可以选择一个种类。起始版 # 本为列表版。 '''此版本不需要输入开多少条生产线,会根据生产线条数自动算所有情况。''' import itertools def sumoftuple(tuple): sum = 0 for i in tuple: sum = sum + i return sum #各条生产线生产不同种类产品时的不同CRF,来源:capacity review表 crfR1 = [4,5.7,3.8] crfR2 = [2] crfR3 = [1.1,1.6] crfR4 = [1.56] crfR5 = [0.9] crfR6 = [1.9] crfR9 = [0.55] #将CRF转化为单生产线单日产能 def crftocap(a): return 24/a #将每个釜的单生产线单日产能存为列表 capR1 = list(map(crftocap,crfR1)) capR2 = list(map(crftocap,crfR2)) capR3 = list(map(crftocap,crfR3)) capR4 = list(map(crftocap,crfR4)) capR5 = list(map(crftocap,crfR5)) capR6 = list(map(crftocap,crfR6)) capR9 = list(map(crftocap,crfR9)) #total为全列表,用来供combinations做不同的组合,reactor为想开几个釜,zuhe为该情况下的开釜组合情况 total = [capR1,capR2,capR3,capR4,capR5,capR6,capR9] reactor = len(total) zuhecount = 0 for r in range(1,reactor+1): zuhe = itertools.combinations(total,r) #根据开釜的组合,再从每个釜可以选择的产品种类来排列,算出每种组合的总产能 # zuhecount = 0 capacity = [] for c in zuhe: d = itertools.product(*c) # zuhecount+=1 for i in d: sum = sumoftuple(i) zuhecount+=1 print('以下为第',zuhecount,'种组合,开',r,'个釜的单日产能:','%.2f'%sum) capacity.append(sum) print('单日产能介于%.2f和%.2f之间。'%(min(capacity),max(capacity)))
997,609
3a23dd3fbfb57779754d7780b650e856b53936f4
class CustomList(object): def __init__(self): self.custom_list = [] def add(self, node): index = self.get_index(self.custom_list, node) # hitta den plats där den ska sättas in self.custom_list.insert(index, node) def delete(self, node): index = self.bin_search(self.custom_list, node) self.custom_list.pop(index) def get_first(self): return self.custom_list[0] def __str__(self): string = '' for i in range(len(self.custom_list)): string = string + str(self.custom_list[i]) + ', ' return string def cell_search(self, cell): index = -1 for i in range(len(self.custom_list)): if self.custom_list[i].cell == cell: index = i break return index def get_index(self, custom_list, node): # sök på f if len(custom_list) == 0: return 0 index_f = self.bin_search(custom_list, node, 0) index = index_f if index != len(custom_list): if custom_list[index].fake_f == node.fake_f: # sortera på h sublist_f, list_index = self.get_sublist_f(custom_list, index, node) index_h = self.bin_search(sublist_f, node, 1) index = list_index + index_h if index != len(custom_list): if custom_list[index].fake_h == node.fake_h: # sortera på direction sublist_h, list_index = self.get_sublist_h(custom_list, index, node) index_dir = self.bin_search(sublist_h, node, 2) index = list_index + index_dir return index def get_sublist_f(self, custom_list, index, node): lo = index hi = index sublist = [] value = node.fake_f while hi < len(custom_list) - 1: if custom_list[hi + 1].fake_f == value: hi = hi + 1 else: break while lo > 0: if custom_list[lo - 1].fake_f == value: lo = lo - 1 else: break list_index = lo while lo <= hi: sublist.append(custom_list[lo]) lo = lo + 1 return sublist, list_index def get_sublist_h(self, custom_list, index, node): lo = index hi = index sublist = [] value = node.fake_h while hi < len(custom_list) - 1: if custom_list[hi + 1].fake_h == value: hi = hi + 1 else: break while lo > 0: if custom_list[lo - 1].fake_h == value: lo = lo - 1 else: break list_index = lo while lo <= hi: sublist.append(custom_list[lo]) lo = lo + 1 return sublist, list_index def bin_search(self, custom_list, node, value_comp): lo = 0 hi = len(custom_list) mid = (lo + hi)//2 # // är för floored integer division if value_comp == 0: value = node.fake_f # börja med att jämföra f elif value_comp == 1: value = node.fake_h elif value_comp == 2: value = node.direction_value else: value = 0 other_value = 0 # måste ha ett initialt värde, ändras i while-loopen sen. Kan inte sätta # custom_list[mid].fake_f pga kaos om listan är tom while hi != lo: if value_comp == 0: other_value = custom_list[mid].fake_f elif value_comp == 1: other_value = custom_list[mid].fake_h elif value_comp == 2: other_value = custom_list[mid].direction_value if value < other_value: hi = mid mid = (hi + lo)//2 elif value > other_value: lo = mid + 1 mid = (hi + lo)//2 elif value == other_value: return int(mid) # elif value == other_value: # jämför på nästa sak # if value_comp == 0: # value = node.fake_h # other_value = custom_list[mid].fake_h # elif value_comp == 1: # value = node.direction_value # other_value = custom_list[mid].direction_value # elif value_comp == 2: # return mid # else: # hi = lo #value_comp = value_comp + 1 return int(mid)
997,610
e92dc3498786ace17fc6ffaae97fd6de8d82c759
import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt import seaborn as sns from google.cloud import language_v1 from google.cloud.language_v1 import enums from google.cloud import storage #Initialize Storage Client and download tweets csv file storage_client = storage.Client('interview-flighthub').from_service_account_json('/home/lwalls/documents/apikey.json') bucket = storage_client.get_bucket('fh-interview-speech-audio') blob = bucket.blob('flighthub_tweets_raw.csv') blob.download_to_filename('/home/lwalls/documents/natlang/flighthub_tweets_raw.csv') # Place Tweets in Df and clean #df = pd.read_csv('gs://fh-interview-speech-audio/flighthub_tweets_raw.csv') df = pd.read_csv('flighthub_tweets_raw.csv') df = df.drop(['replies', 'retweets', 'favorites', 'unix_timestamp', 'url', '__url'], axis=1) ## clean dates twitter_dates = df['date'] twitter_dates_converted = [] for d in twitter_dates: if '2018' in d: d = datetime.strptime(d, '%d %b %Y').date() d.strftime('%Y-%m-%d') twitter_dates_converted.append(d) else: d = datetime.strptime(d, '%b %d').date() d.strftime('%Y-%m-%d') d = d.replace(year=datetime.today().year) twitter_dates_converted.append(d) df['date'] = twitter_dates_converted ## sort by newest to oldest df = df.sort_values('date', ascending=False) # Sentiment Analysis def language_analysis(text_content): client = language_v1.LanguageServiceClient.from_service_account_json('/home/lwalls/documents/apikey.json') type_ = enums.Document.Type.PLAIN_TEXT language = 'en' document = {'content': text_content, 'type': type_, 'language': language} encoding_type = enums.EncodingType.UTF8 response_sentiment = client.analyze_sentiment(document, encoding_type=encoding_type) #sent_analysis = document.analyze_sentiment() # print(dir(response_sentiment)) sentiment = response_sentiment.document_sentiment #ent_analysis = document.analyze_entities() response_ent = client.analyze_entities(document, encoding_type=encoding_type) entities = response_ent.entities return sentiment, entities tweets = df['content'] sentiment_score = [] sentiment_magnitude = [] df_entities = pd.DataFrame() for tweet in tweets: # Sentiment sentiment, entities = language_analysis(tweet) sentiment_score.append(sentiment.score) sentiment_magnitude.append(sentiment.magnitude) # Entities entity_name = [] entity_type = [] entity_salience = [] df_ent_tmp = pd.DataFrame() for e in entities: entity_name.append(e.name) entity_type.append(enums.Entity.Type(e.type).name) entity_salience.append(e.salience) df_ent_tmp['name'] = entity_name df_ent_tmp['type'] = entity_type df_ent_tmp['salience'] = entity_salience df_ent_tmp['tweet'] = tweet df_entities = pd.concat([df_entities, df_ent_tmp], ignore_index=True) del df_ent_tmp df['sentiment_score'] = sentiment_score df['sentiment_magnitude'] = sentiment_magnitude print(df.head(10)) print(df_entities.head(10)) df.to_csv('flighthub_tweets_sentiment.csv') df_entities.to_csv('flighthub_tweets_entities.csv')
997,611
ba0bc1633f6097d74e8d006b9b626600239a001b
import math import random from collections import namedtuple, deque import gym import numpy as np import gym_minigrid import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.distributions.categorical import Categorical from IPython.display import clear_output import matplotlib.pyplot as plt from common.misc_utils import one_hot, Transition, ReplayMemory from common.multiprocessing_env import ParallelEnv # Hyper Parameters MINI_BATCH_SIZE = 256 LR = 1e-3 # learning rate GAMMA = 0.99 # reward discount TAU = 0.95 # average parameter PPO_EPOCHS = 4 MAX_FRAMES = 1e5 T_STEPS = 128 # steps per process before updating MAX_GRAD_NORM = 0.5 ENV_NAME = "MiniGrid-DoorKey-5x5-v0" #MiniGrid-Empty-5x5-v0 MiniGrid-DoorKey-5x5-v0 NUM_ENVS = 16 use_cuda = torch.cuda.is_available() if use_cuda: device = "cuda" torch.set_default_tensor_type('torch.cuda.FloatTensor') batch_num = 0 def init_params(m): if isinstance(m, nn.Linear): m.weight.data.normal_(0, 1) m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True)) if m.bias is not None: m.bias.data.fill_(0) class ActorCritic(nn.Module): def __init__(self, num_inputs, num_outputs): super().__init__() # image network self.image = nn.Sequential( nn.Conv2d(3, 16, (2, 2)), nn.ReLU(), nn.MaxPool2d((2, 2)), nn.Conv2d(16, 32, (2, 2)), nn.ReLU(), nn.Conv2d(32, 64, (2, 2)), nn.ReLU() ) # Define actor's model self.actor = nn.Sequential( nn.Linear(num_inputs, 64), nn.Tanh(), nn.Linear(64, num_outputs) ) # Define critic's model self.critic = nn.Sequential( nn.Linear(num_inputs, 64), nn.Tanh(), nn.Linear(64, 1) ) self.apply(init_params) def forward(self, image, data): image = image.transpose(1, 3).transpose(2, 3) x1 = self.image(image) x1 = x1.reshape(x1.shape[0], -1) x2 = data x = torch.cat((x1, x2), dim = 1) dist = self.actor(x) dist = Categorical(logits=F.log_softmax(dist, dim=1)) value = self.critic(x).squeeze(1) return dist, value # calculate GAE advantage def compute_gae(next_value, rewards, masks, values, gamma = 0.99, tau = 0.95): values = values + [next_value] gae = 0 returns = [] advantages = [] for step in reversed(range(len(rewards))): delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step] gae = delta + gamma * tau * masks[step] * gae returns.insert(0, gae + values[step]) advantages.insert(0, gae) return returns, advantages # iterate mini_batch used for optimization def get_ppo_batch_index(recurrence = 1): global batch_num batch_size = states_image.size(0) indexes = np.arange(0, batch_size, recurrence) indexes = np.random.permutation(indexes) # Shift starting indexes by self.recurrence//2 half the time if batch_num % 2 == 1: indexes = indexes[(indexes + recurrence) % T_STEPS != 0] indexes += recurrence // 2 batch_num += 1 num_indexes = MINI_BATCH_SIZE // recurrence batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)] return batches_starting_indexes # update def ppo_update(model, optimizer, ppo_epochs, mini_batch_size, states_image, states_data, actions, log_probs, returns, advantages, clip_param=0.2): for _ in range(ppo_epochs): for indexs in get_ppo_batch_index(): states_image_ = states_image[indexs] states_data_ = states_data[indexs] action = actions[indexs] old_log_probs = log_probs[indexs] return_ = returns[indexs] advantage = advantages[indexs] dist, value = model(states_image_, states_data_) entropy = dist.entropy().mean() new_log_probs = dist.log_prob(action) ratio = (new_log_probs - old_log_probs).exp() surr1 = ratio * advantage surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * advantage actor_loss = - torch.min(surr1, surr2).mean() value_clipped = return_ - advantage + torch.clamp(value - (return_ - advantage), -clip_param, clip_param) surr1 = (value - return_).pow(2) surr2 = (value_clipped - return_).pow(2) critic_loss = torch.max(surr1, surr2).mean() loss = 0.5 * critic_loss + actor_loss - 0.01 * entropy optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), MAX_GRAD_NORM) optimizer.step() def state_process(state, goal_pos, current_pos): images = [] datas = [] for i in range(len(state)) : direction = state[i]['direction'] s = np.append(direction, goal_pos[i]) s = np.append(s, current_pos[i]) image = state[i]['image'] images.append(image) datas.append(s) return torch.Tensor(images), torch.Tensor(datas) # guard for multiprocessing if __name__ == "__main__": ##################################### def make_env(env_name, seed): env = gym.make(env_name) env.seed(seed) return env envs = [make_env(ENV_NAME, 1 + 10000*i) for i in range(NUM_ENVS)] envs = ParallelEnv(envs) print("Environments Loaded!\n") ######################################## env = gym.make(ENV_NAME) env.seed(0) n = env.observation_space["image"].shape[0] m = env.observation_space["image"].shape[1] image_embedding_size = ((n-1)//2-2)*((m-1)//2-2)*64 embedding_size = image_embedding_size + 2 + 2 + 1 act_shape = env.action_space.n ########################################### def test_env(acmodel, vis=False): state = env.reset() if vis: env.render() for grid in env.grid.grid: if grid is not None and grid.type == "goal": goal = grid.cur_pos done = False total_reward = 0 while not done: image = torch.Tensor(state['image']).unsqueeze(0) direction = state['direction'] data = np.append(direction, goal) data = np.append(data, env.agent_pos) data = torch.Tensor(data).unsqueeze(0) dist, _ = acmodel(image, data) next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()) state = next_state if vis: env.render() total_reward += reward return total_reward ########################################### # GET GOAL POSE # for grid in env.grid.grid: # if grid is not None and grid.type == "goal": # goal_pose = grid.cur_pos model = ActorCritic(embedding_size, act_shape).to(device) optimizer = optim.Adam(model.parameters(), lr=LR) # state = env.reset() state = envs.reset() goal = envs.get_goal() frame_idx = 0 ep_reward = 0 avg_rw = deque(maxlen=30) ep_number = 0 while frame_idx < MAX_FRAMES: # variables to record experience log_probs = [] values = [] states_image = [] states_data = [] actions = [] rewards = [] masks = [] entropy = 0 # collect experience for _ in range(T_STEPS): image_processed, data_processed = state_process(state, goal, envs.agent_pose()) with torch.no_grad(): dist, value = model(image_processed, data_processed) action = dist.sample() next_state, reward, done, _ = envs.step(action.cpu().numpy()) goal = envs.get_goal() log_prob = dist.log_prob(action) entropy += dist.entropy().mean() log_probs.append(log_prob) # log_probs.append(log_prob) values.append(value) rewards.append(torch.Tensor(reward)) masks.append(torch.Tensor(1-np.stack(done))) states_image.append(image_processed) states_data.append(data_processed) actions.append(action) # actions.append(action) state = next_state frame_idx += T_STEPS * NUM_ENVS test_reward = np.mean([test_env(model) for _ in range(10)]) print(f'frames:{frame_idx}\tavg_rw:{test_reward}') with torch.no_grad(): next_image_processed, next_data_processed = state_process(next_state, goal, envs.agent_pose()) _, next_value = model(next_image_processed, next_data_processed) returns, advantages = compute_gae(next_value, rewards, masks, values, GAMMA, TAU) returns = torch.cat(returns).detach() log_probs = torch.cat(log_probs).detach() values = torch.cat(values).detach() states_image = torch.cat(states_image) states_data = torch.cat(states_data) actions = torch.cat(actions) advantages = torch.cat(advantages) # check shape of each attributes of experience batch # !!! Important to keep consistant order for all attributes # print('---') # print(log_probs.shape) # print(values.shape) # print(returns.shape) # print(advantages.shape) # print(actions.shape) # print('---') ppo_update(model, optimizer, PPO_EPOCHS, MINI_BATCH_SIZE, states_image, states_data, actions, log_probs, returns, advantages)
997,612
daa21e83ae8358e4492542d923c7ea815b247721
from xai.brain.wordbase.adjectives._stocky import _STOCKY #calss header class _STOCKIEST(_STOCKY, ): def __init__(self,): _STOCKY.__init__(self) self.name = "STOCKIEST" self.specie = 'adjectives' self.basic = "stocky" self.jsondata = {}
997,613
334e7733cc2e88c3184694d5f623572f2d7aefac
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-07-03 11:57 from __future__ import unicode_literals import ckeditor_uploader.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('pages', '0004_lunchsteps_title'), ] operations = [ migrations.AlterField( model_name='lunchsteps', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(), ), ]
997,614
54989e91287ed43f72b0f2804c98c7217dc4e7e5
def is_in_request_parameters(request, param_name): result = False try: if not request: raise Exception('Request object not provided') if not param_name: raise Exception('Parameter name not provided') if request.method == 'GET': if param_name in request.GET: result = True elif request.method == 'POST': if param_name in request.POST: result = True else: raise Exception('Unsupported request method : %s' % request.method) except Exception, e: return None, 'Error checking request parameters : %s' % str(e) else: return result, None def get_request_parameter_values(request, param_name_list): return_dict = {} try: if not request: raise Exception('Request object not provided') if not param_name_list: raise Exception('Parameter names not provided') if request.method == 'GET': for param_name in param_name_list: if param_name in request.GET: return_dict[param_name] = request.GET.get(param_name) elif request.method == 'POST': for param_name in param_name_list: if param_name in request.POST: return_dict[param_name] = request.POST.get(param_name) else: raise Exception('Unsupported request method : %s' % request.method) except Exception, e: return None, 'Error checking request parameters : %s' % str(e) else: return return_dict, None # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
997,615
c960764e12f2651019d4cb896b88856a76b81ff8
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import urllib from xml.etree import ElementTree as ET from .define import * import time def parse_xml(web_data): if len(web_data) == 0: return None xmlData = ET.fromstring(web_data) msg_type = xmlData.find('MsgType').text if msg_type == 'text': return TextMsg(xmlData) elif msg_type == 'image': return ImageMsg(xmlData) class Msg(object): def __init__(self, xmlData): self.ToUserName = xmlData.find('ToUserName').text self.FromUserName = xmlData.find('FromUserName').text self.CreateTime = xmlData.find('CreateTime').text self.MsgType = xmlData.find('MsgType').text self.MsgId = xmlData.find('MsgId').text class TextMsg(Msg): def __init__(self, xmlData): Msg.__init__(self, xmlData) self.Content = xmlData.find('Content').text.encode("utf-8") class ImageMsg(Msg): def __init__(self, xmlData): Msg.__init__(self, xmlData) self.PicUrl = xmlData.find('PicUrl').text self.MediaId = xmlData.find('MediaId').text class ReplyMsg(object): def send(self): return "success" class ReplyTextMsg(ReplyMsg): def __init__(self, toUserName, fromUserName, content): self.__dict = dict() self.__dict['ToUserName'] = toUserName self.__dict['FromUserName'] = fromUserName self.__dict['CreateTime'] = int(time.time()) self.__dict['Content'] = content def send(self): XmlForm = """ <xml> <ToUserName><![CDATA[{ToUserName}]]></ToUserName> <FromUserName><![CDATA[{FromUserName}]]></FromUserName> <CreateTime>{CreateTime}</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[{Content}]]></Content> </xml> """ return XmlForm.format(**self.__dict) class ReplyImageMsg(ReplyMsg): def __init__(self, toUserName, fromUserName, mediaId): self.__dict = dict() self.__dict['ToUserName'] = toUserName self.__dict['FromUserName'] = fromUserName self.__dict['CreateTime'] = int(time.time()) self.__dict['MediaId'] = mediaId def send(self): XmlForm = """ <xml> <ToUserName><![CDATA[{ToUserName}]]></ToUserName> <FromUserName><![CDATA[{FromUserName}]]></FromUserName> <CreateTime>{CreateTime}</CreateTime> <MsgType><![CDATA[image]]></MsgType> <Image> <MediaId><![CDATA[{MediaId}]]></MediaId> </Image> </xml> """ return XmlForm.format(**self.__dict) class WechatToken(object): def __init__(self): self.__accessToken = '' self.__leftTime = 0 def __real_get_access_token(self): appId = g_wechat_id appSecret = g_wechat_secret postUrl = ("https://api.weixin.qq.com/cgi-bin/token?grant_type=" "client_credential&appid=%s&secret=%s" % (appId, appSecret)) urlResp = urllib.urlopen(postUrl) urlResp = json.loads(urlResp.read()) self.__accessToken = urlResp['access_token'] self.__leftTime = urlResp['expires_in'] def get_access_token(self): if self.__leftTime < 10: self.__real_get_access_token() return self.__accessToken class WechatMenu(object): def create(self, postData, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s" % accessToken if isinstance(postData, unicode): postData = postData.encode('utf-8') urlResp = urllib.urlopen(url=postUrl, data=postData) print urlResp.read() def query(self, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/menu/get?access_token=%s" % accessToken urlResp = urllib.urlopen(url=postUrl) print urlResp.read() def delete(self, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=%s" % accessToken urlResp = urllib.urlopen(url=postUrl) print urlResp.read() # 获取自定义菜单配置接口 def get_current_selfmenu_info(self, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info?access_token=%s" % accessToken urlResp = urllib.urlopen(url=postUrl) print urlResp.read()
997,616
7989156af279028f5973b1b04d3c3ca738751f34
import json import sys class Scenario: """ A class used to represent an attack scenario Attributes ---------- probabilty : float probability of scenario occurring, 0-1 nodeKeys : [str] list of node keys, each key represented by a string Methods ------- combine(scenario2) Combines scenario2 with self """ def __init__(self, probability, nodeKeys): """ Initializes a scenario with the given probability and nodeKeys. Parameters ---------- probability : float? The scenario's probability of occurring nodeKeys : [str] The list of nodes in the scenario """ self.probability = float(probability) self.nodeKeys = nodeKeys ''' def __str__(self): """Returns nodes in scenario""" # TODO: Check output with deeper tree risk = str(round(float(treeRoot["impact"]) * self.probability, 4)) prob = str(round(self.probability, 4)) output = "risk: " + risk + " \tprob: " + prob + "\t: " for key in self.nodeKeys: output += key + " " return output ''' def toDict(self,treeRoot,nodesList): """Returns dictionary representation of scenario""" risk = round(float(treeRoot["impact"]) * self.probability, 4) prob = round(self.probability, 4) leafKeys = self.nodeKeys keyTextPairs = [] for key in leafKeys: node = findNode(key,nodesList) keyTextPairs.append([key, node["text"]]) dit = { "risk" : risk, "probability" : prob, "leafKeys" : keyTextPairs } return dit def combine(self, scenario2): """ Combine scenarios. Used in AND nodes. Parameters ---------- scenario2 : Scenario The scenario to combine """ prob = self.probability * scenario2.probability keys = list(()) for key in self.nodeKeys: keys.append(key) for key in scenario2.nodeKeys: keys.append(key) return Scenario(prob, keys) def normalize(nodesList): """ Normalizes probabilty of attacks in nodeList to sum to 1 """ sum = 0.0 for node in nodesList: if node["key"][0] == "L" or node["key"][0] == "S": sum += float(node["probability"]) for node in nodesList: if node["key"][0] == "L": node["probability"] = float(node["probability"]) / sum def findRoot(nodesList,edgesList): """Find root node.""" root = None for n in nodesList: hasParent = False # Check if node exists as a destination in edge list for e in edgesList: if e["to"] == n["key"]: hasParent = True break if not hasParent: root = n break if root is None: print("Error:: Cannot find root node") return root def findAttackRoot(root,edgesList,nodesList): """ Find root node of attack tree (that does not include safe path node). Parameters ---------- root : Node (list) Root of tree """ children = findChildren(root,edgesList,nodesList) for node in children: if node["key"][0] != "S": return node print("Error:: Cannot find attack root node") return root def findNode(key,nodesList): """ Finds the node with the given key. Parameters ---------- key : str Key of node to find """ for node in nodesList: if node["key"] == key: return node print("Error:: Could not find node with given key") def findChildren(node,edgesList,nodesList): """ Searches edge list to find all children of given node. Parameters ---------- node : Node Node to find children of """ children = list(()) for e in edgesList: if e["from"] == node["key"]: children.append(findNode(e["to"],nodesList)) return children def findScenarios(node,edgesList,nodesList): """ Recusive function for finding all scenarios from a given node. Parameters ---------- node : Node Node to find scenarios of """ if node["key"][0] == "L": # If leaf node scenarioList = list(()) scenarioList.append(Scenario(node["probability"], [node["key"]])) return scenarioList elif node["key"][0] == "O": # If OR node scenarioList = list(()) children = findChildren(node,edgesList,nodesList) for child in children: childScenarios = findScenarios(child,edgesList,nodesList) for scenario in childScenarios: scenarioList.append(scenario) return scenarioList elif node["key"][0] == "A": # If AND node scenarioList = list(()) tempList = list(()) childLists = list(()) # List of lists children = findChildren(node,edgesList,nodesList) for child in children: # Create list of child scenario lists childLists.append(findScenarios(child,edgesList,nodesList)) scenarioList = childLists[0] for i in range(1, len(childLists)): # Compare all combinations of scenarios for scenario1 in scenarioList: for scenario2 in childLists[i]: tempList.append(scenario1.combine(scenario2)) scenarioList = tempList tempList = list(()) return scenarioList else: print("Error:: Could not determine node type") # Get object from JSON to List jsonObj = """ { "nodeData": [ { "key": "OR3", "text": "placeholderText", "riskIndex": "0", "impact": "450", "color": "red", "shape": "andgate" }, { "key": "LEAF5", "text": "safePath", "riskIndex": "8" }, { "key": "AND", "text": "placeholderText", "riskIndex": "0", "color": "red", "shape": "andgate" }, { "key": "OR", "text": "placeholderText", "riskIndex": "0", "color": "lightgreen", "shape": "orgate" }, { "key": "LEAF", "text": "cyberAttack", "riskIndex": "1.1" }, { "key": "LEAF2", "text": "physicalAttack", "riskIndex": "8.3" }, { "key": "OR2", "text": "placeholderText", "riskIndex": "0", "color": "lightgreen", "shape": "orgate" }, { "key": "LEAF3", "text": "phishingAttack", "riskIndex": "7.2" }, { "key": "LEAF4", "text": "spyware", "riskIndex": "3.4" } ], "edgeData": [ { "from": "OR3", "to": "AND", "fromPort": "b", "toPort": "t", "key": -7 }, { "from": "OR3", "to": "LEAF5", "fromPort": "b", "toPort": "t", "key": -8 }, { "from": "AND", "to": "OR", "fromPort": "b", "toPort": "t", "key": -1 }, { "from": "OR", "to": "LEAF", "fromPort": "b", "toPort": "t", "key": -2 }, { "from": "OR", "to": "LEAF2", "fromPort": "b", "toPort": "t", "key": -3 }, { "from": "AND", "to": "OR2", "fromPort": "b", "toPort": "t", "key": -4 }, { "from": "OR2", "to": "LEAF3", "fromPort": "b", "toPort": "t", "key": -5 }, { "from": "OR2", "to": "LEAF4", "fromPort": "b", "toPort": "t", "key": -6 } ] } """ # jsonData = json.loads(jsonObj) # nodesList = jsonData["nodeData"] # edgesList = jsonData["edgeData"] # normalize() # treeRoot = findRoot() # attackRoot = findAttackRoot(treeRoot) # scenarios = findScenarios(attackRoot) # scenList = [] # for scen in scenarios: # scenList.append(scen.toDict()) # sendToFrontendJson = json.dumps(scenList) # print(sendToFrontendJson) def api_request(frontend_json): jsonData = json.loads(frontend_json) nodesList = jsonData["nodeData"] edgesList = jsonData["edgeData"] normalize(nodesList) treeRoot = findRoot(nodesList,edgesList) attackRoot = findAttackRoot(treeRoot,edgesList,nodesList) scenarios = findScenarios(attackRoot,edgesList,nodesList) scenList = [] if(scenarios != None): for scen in scenarios: scenList.append(scen.toDict(treeRoot,nodesList)) sendToFrontendJson = json.dumps(scenList) return sendToFrontendJson
997,617
93d06de5390dbb5d40342f7434724a2b50fbbadf
from django.db import models from foodpool.v1.core.models import TimestampedModel, CanadaAddressModel, AvailabilityModel class MenuManager(models.Manager): def create(self, **required_fields): menu = self.model(**required_fields) menu.save() return menu class Menu(TimestampedModel): skip_id = models.CharField(max_length=36, unique=True) class MenuGroups(TimestampedModel): name = models.TextField() menu = models.ForeignKey(Menu, on_delete=models.PROTECT) class MenuGroupAvailability(AvailabilityModel, TimestampedModel): group = models.ForeignKey(MenuGroups, on_delete=models.PROTECT) class MenuItems(TimestampedModel): objects = MenuManager() name = models.TextField() description = models.TextField() group = models.ForeignKey(MenuGroups, on_delete=models.PROTECT) calories = models.IntegerField(null=True) price = models.IntegerField() available = models.BooleanField(default=True) class MenuOptions(TimestampedModel): menu_item = models.ForeignKey(MenuItems, on_delete=models.CASCADE) name = models.TextField() description = models.TextField() minimum = models.IntegerField() maximum = models.IntegerField() available = models.BooleanField(default=True) class Meta(TimestampedModel.Meta): constraints = [ models.CheckConstraint(check=models.Q(minimum__lte=models.F("maximum")), name="optionConstraint") ] class MenuOptionChoices(TimestampedModel): menu_option = models.ForeignKey(MenuOptions, on_delete=models.CASCADE) choice = models.TextField() price = models.IntegerField() calories = models.IntegerField(null=True) minimum = models.IntegerField() maximum = models.IntegerField() available = models.BooleanField(default=True) class Meta(TimestampedModel.Meta): constraints = [ models.CheckConstraint(check=models.Q(minimum__lte=models.F("maximum")), name="choiceConstraint") ] class Restaurant(TimestampedModel): name = models.TextField() short_name = models.TextField() description = models.TextField(null=True) class RestaurantLocation(CanadaAddressModel, TimestampedModel): restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE, related_name='location') is_open = models.BooleanField(default=False) menu = models.ForeignKey(Menu, on_delete=models.PROTECT, related_name='location') class RestaurantLocationAvailability(AvailabilityModel): restaurant_location = models.ForeignKey(RestaurantLocation, on_delete=models.PROTECT)
997,618
9d18c4c5b8ee4edd580302b889440d2b806aa4e3
from db import db from datetime import datetime class Address(db.Model): id = db.Column(db.Integer, primary_key=True) firstName = db.Column(db.String(80), nullable=False) lastName = db.Column(db.String(80), nullable=False) city = db.Column(db.String(80), nullable=False) address = db.Column(db.String(80), nullable=False) zip = db.Column(db.String(10), nullable=False) state = db.Column(db.String(20), nullable=False) country = db.Column(db.String(20), nullable=False) createdAt = db.Column(db.DateTime, default=datetime.utcnow()) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __repr__(self): return f"Address<f{self.user_id}, {self.id}>" @classmethod def find_by_id(cls, id): return Address.query.filter(Address.id == id).first() def save_to_db(self): db.session.add(self) db.session.commit() def delete_from_db(self): db.session.delete(self) db.session.commit()
997,619
1ec803224b1356341a84dd42b566718775f2bb47
from get_partial_match import get_partial_match def kmp_search(full_str, sub_str): """finds all beginning index of sub_str occurred in full_str search faster by skipping already known matched positions runs in O(n) """ n, m = len(full_str), len(sub_str) result = [] pi = get_partial_match(sub_str) begin, matched = 0, 0 while begin <= (n - m): if matched < m and full_str[begin + matched] == sub_str[matched]: matched += 1 if matched == m: result.append(begin) else: if matched == 0: begin += 1 else: begin += (matched - pi[matched - 1]) matched = pi[matched - 1] return result
997,620
7e92f962049dc397d6171770405644fb8ea5b292
from unittest import TestCase import mock from robust_urls.utils import locale_context, try_url_for_language from django.core.urlresolvers import Resolver404 class LocaleContextTestCase(TestCase): @mock.patch('robust_urls.utils.translation') def testWillCallTranslationActivateBeforeCallingBlock(self, translation): with locale_context('pl_PL'): translation.activate.assert_called_once_with('pl_PL') translation.activate.assert_called_once_with('pl_PL') @mock.patch('robust_urls.utils.translation') def testWillCallTranslationDeactivateAfterCallingBlock(self, translation): with locale_context('pl_PL'): translation.deactivate.assert_not_called() translation.deactivate.assert_called_once() @mock.patch('robust_urls.utils.translation') def testWillCallWhateverIsInTheBlock(self, translation): fun = mock.Mock() with locale_context('anything'): fun() fun.assert_called_once() class TryUrlForLanguageTestCase(TestCase): @mock.patch('robust_urls.utils.locale_context') def testWillCallResolverInsideLocaleContext(self, locale_context): resolver = mock.Mock() try_url_for_language('/some/path/', 'pl_PL', resolver) resolver.resolve.assert_called_once_with('/some/path/') locale_context.assert_called_with('pl_PL') @mock.patch('robust_urls.utils.locale_context') def testWillReturnWhateverResolverReturns(self, locale_context): resolver = mock.Mock() resolver.resolve.return_value = 'hello' result = try_url_for_language('/some/path', 'pl_PL', resolver) self.assertEquals('hello', result) @mock.patch('robust_urls.utils.locale_context') def testWillReturnNoneIfResolverRaises(self, locale_context): resolver = mock.Mock() resolver.resolve.side_effect = Resolver404 result = try_url_for_language('/any/', 'pl_PL', resolver) self.assertIsNone(result)
997,621
05a9cbad06470ae56f81d99ab8fd91303b5e8daf
from flask import render_template, flash, redirect, url_for from ...utils.flash_errors import flash_errors from ...utils.zelda_modelo import ZeldaModelo class FuncionalidadeListarNegocio: def exibir(): funcionalidades = ZeldaModelo.lista_funcionalidades() return render_template('funcionalidade_listar.html', funcionalidades = funcionalidades)
997,622
bea1cdedec2b076888e99216e137886bd66da3f1
__author__ = 'liu.xingjie' #BFS class Solution: # @param beginWord, a string # @param endWord, a string # @param wordDict, a set of string!!!wordDict is a set type!!! # @return an integer def ladderLength(self, beginWord, endWord, wordDict): wordDict.add(endWord) aplha = 'abcdefghijklmnopqrstuvwxyz' q = [] q.append((beginWord, 1)) while q: curr = q.pop(0) currword = curr[0]; currlen = curr[1] if currword == endWord: return currlen for i in range(len(beginWord)): part1 = currword[:i]; part2 = currword[i+1:] for j in aplha: if currword[i] != j: nextword = part1 + j + part2 if nextword in wordDict: q.append((nextword, currlen+1)); wordDict.remove(nextword) return 0 s=Solution() test = ['hot','dot','dog','lot','log'] d = set(test) res= s.ladderLength('hit', 'cog', d) print res
997,623
bf1751628a0df1e07e43a8a5ee350d96acad3191
import os print("Hello from aayushi, in 2019")
997,624
f3023b7368fab2870f9db9122bf0038f446622cf
CMD = """ SELECT a1.*, a2.s_dq_adjfactor FROM (SELECT * FROM AShareEODDerivativeIndicator WHERE trade_dt = '{date}') AS a1 INNER JOIN (SELECT * FROM AShareEODPrices WHERE trade_dt = '{date}') AS a2 ON (a1.s_info_windcode = a2.s_info_windcode) WHERE LEFT(a1.s_info_windcode, 2) IN ('60', '00', '30') """ cols = range(4, 25) dnames = ['total_cap', 'float_cap', 'high_52w', 'low_52w', 'PE', 'PB', 'PE_TTM', 'POCF', 'POCF_TTM', 'PCF', 'PCF_TTM', 'PS', 'PS_TTM', 'turnover', 'free_turnover', 'total_shares', 'float_shares', 'close', 'PD', 'adj_high_52w', 'adj_low_52w']
997,625
10c3a1f6933583f9e4b90b11148881f93e30d866
from collections import deque m, n = map(int, input().split()) mat = [list(input()) for _ in range(n)] check = [[False] * m for _ in range(n)] w, b = 0, 0 dx = [-1, 1, 0, 0] dy = [0, 0, -1, 1] def bfs(x, y): q = deque() q.append((x, y)) cnt = 1 check[x][y] = True while q: x, y = q.popleft() for i in range(4): nx = dx[i] + x ny = dy[i] + y if nx < 0 or ny < 0 or nx >= n or ny >= m: continue if check[nx][ny] == False: if mat[nx][ny] == mat[x][y]: q.append((nx, ny)) check[nx][ny] = True cnt += 1 return cnt for i in range(n): for j in range(m): if check[i][j] == False: res = bfs(i, j) if mat[i][j] == 'W': w += res ** 2 else: b += res ** 2 print(w, b)
997,626
bf198836faf4716e9d118ca08e7f983808a7948d
from django.db import models from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation from django.contrib.contenttypes.models import ContentType # Create your models here. class Client(models.Model): userid = models.IntegerField(primary_key=True) username = models.CharField(max_length=50, unique=True) password = models.CharField(max_length=250) phonenumber = models.CharField(max_length=10) name = models.CharField(max_length=50) cFlag = models.IntegerField() sFlag = models.IntegerField() website = models.CharField(max_length=50, blank=True) class Meta: unique_together = (('userid', 'username')) #def __str__(self): # return self.userid + " " + self.username + " " + self.password + " " + self.phonenumber + " " + self.name + " " + self.cFlag + " " + self.sFlag + " " + self.website class Collectible(models.Model): collectible_object_id = models.IntegerField() collectible_content_type = models.ForeignKey(ContentType, on_delete=models.PROTECT) collectibleID = GenericForeignKey('collectible_content_type', 'collectible_object_id') name = models.CharField(max_length=50, default="") type = models.CharField(max_length=20, default="") year = models.IntegerField(default=1900) def __str__(self): return self.name + " " + self.type + " " + self.year class Album(Collectible): artist = models.CharField(max_length=50) def __str__(self): return self.name + " by " + self.artist + " - " + self.type + " " + self.year class AlbumGenre(models.Model): albumID = models.ForeignKey( Album, related_name='genre', on_delete=models.CASCADE, default=-1) genre = models.CharField(max_length=20) def __str__(self): return self.genre class ComicBook(Collectible): author = models.CharField(max_length=50) illustrator = models.CharField(max_length=50) def __str__(self): return self.name + " written by " + self.author + " illustrated by " + self.illustrator + " - " + self.type + " " + self.year class ComicGenre(models.Model): comicID = models.ForeignKey( ComicBook, related_name='genre', on_delete=models.CASCADE, default=-1) genre = models.CharField(max_length=20) #def __str__(self): # return self.comicID + " " + self.genre class SportCard(Collectible): sport = models.CharField(max_length=20) def __str__(self): return self.name + " " + self.sport + " " + self.type + " " + self.year class Custom(Collectible): description = models.CharField(max_length=250) def __str__(self): return self.name + " " + self.type + " " + self.year + " " + self.description class Order(models.Model): orderId = models.IntegerField(primary_key=True) sourceAddress = models.CharField(max_length=50) totalValue = models.IntegerField() userID = models.IntegerField() userName = models.CharField(max_length=50) def __str__(self): return self.orderId + " " + self.sourceAddress + " " + self.totalValue + " " + self.userID + " " + self.userName class Fulfills(models.Model): userID = models.ForeignKey( Client, related_name='userIDFullfills', on_delete=models.CASCADE, default=-1) userName = models.ForeignKey( Client, related_name='usernameFullfills', on_delete=models.CASCADE, default=-1) orderID = models.ForeignKey( Order, related_name='orderIDFullfills', on_delete=models.CASCADE, default=-1) shippingCost = models.FloatField() class Meta: unique_together = (('userID', 'userName', 'orderID')) def __str__(self): return self.userID + " " + self.userName + " " + self.orderID+" " + self.shippingCost class Payment(models.Model): paymentNo = models.IntegerField(primary_key=True) totalValue = models.FloatField() formOfPayment = models.CharField(max_length=50) orderID = models.ForeignKey( Order, related_name='orderIDPayment', on_delete=models.CASCADE, default=-1) def __str__(self): return self.paymentNo + " " + self.totalValue + " " + self.formOfPayment + " " + self.orderID class Admin(models.Model): userName = models.CharField(max_length=50) password = models.CharField(max_length=50) def __str__(self): return self.userName + " " + self.password class Deals_With(models.Model): orderID = models.ForeignKey( Order, related_name='orderIDDealsWith', on_delete=models.CASCADE, default=-1) adminUsername = models.ForeignKey( Admin, related_name='adminUsernameDealsWith', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('orderID', 'adminUsername')) def __str__(self): return self.adminUsername + " " + self.orderID class Manages(models.Model): model = models.ForeignKey(ContentType, on_delete=models.CASCADE, default=None) id = GenericForeignKey('model', 'id') adminUsername = models.ForeignKey( Admin, related_name='adminUsernameManages', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('model', 'adminUsername')) def __str__(self): return self.id + " " + self.adminUsername class Collection(models.Model): collection_name = models.CharField(primary_key=True, max_length=50) def __str__(self): return self.collection_name class Forms(models.Model): collectible_id = GenericRelation(Collectible) collection_name = models.ForeignKey( Collection, related_name='collectionNameForms', on_delete=models.CASCADE, default=-1) def __str__(self): return self.id + " " + self.collection_name class Made_Of(models.Model): model = models.ForeignKey(ContentType, on_delete=models.CASCADE, default=None) id = GenericForeignKey('model', 'id') order_id = models.ForeignKey( Order, related_name='orderIdMadeOf', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('model', 'order_id')) def __str__(self): return self.id + " " + self.order_id class Wants(models.Model): userID = models.ForeignKey( Client, related_name='userIDwants', on_delete=models.CASCADE, default=-1) userName = models.ForeignKey( Client, related_name='usernameWants', on_delete=models.CASCADE, default=-1) collectionName = models.ForeignKey( Collection, related_name='collectionNameWants', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('userID', 'userName', 'collectionName')) def __str__(self): return self.userID + " " + self.username + " " + self.collectionName class Sells(models.Model): userID = models.ForeignKey( Client, related_name='userIDSells', on_delete=models.CASCADE, default=-1) username = models.ForeignKey( Client, related_name='usernameSells', on_delete=models.CASCADE, default=-1) model = models.ForeignKey(ContentType, on_delete=models.CASCADE, default=None) id = GenericForeignKey('model', 'id') price = models.FloatField() class Meta: unique_together = (('userID', 'model', 'username')) def __str__(self): return self.userID + " " + self.userName + " " + self.id + " " + self.price class UserCollection(models.Model): userName = models.ForeignKey( Client, related_name='usernameUserCollection', on_delete=models.CASCADE, default=-1) userID = models.ForeignKey( Client, related_name='userIDUserCollection', on_delete=models.CASCADE, default=-1) collectionName = models.CharField(max_length=50) class Meta: unique_together = (('userID', 'userName','collectionName')) def __str__(self): return self.userID + " " + self.username + " " + self.collectionName class Consists_Of(models.Model): userID = models.ForeignKey( UserCollection, related_name='userIDConsists_Of', on_delete=models.CASCADE, default=-1) model = models.ForeignKey(ContentType, on_delete=models.CASCADE, default=None) id = GenericForeignKey('model', 'id') collectionName = models.ForeignKey( UserCollection, related_name='collectionNameConsists_Of', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('userID', 'model','collectionName')) def __str__(self): return self.userID + " " + self.id + " " + self.collectionName class Moderates(models.Model): userID = models.ForeignKey( Client, related_name='userIDModerates', on_delete=models.CASCADE, default=-1) username = models.ForeignKey( Client, related_name='usernameModerates', on_delete=models.CASCADE, default=-1) adminUsername = models.ForeignKey( Admin, related_name='adminUsernameModerates', on_delete=models.CASCADE, default=-1) class Meta: unique_together = (('userID', 'username','adminUsername')) def __str__(self): return self.userID + " " + self.username + " " + self.adminUsername class Warehouse (models.Model): address = models.CharField(max_length=50) warehouseNumber = models.IntegerField(default=-1, unique=True) username = models.ForeignKey( Client, related_name='usernameWarehouse', on_delete=models.CASCADE, unique=False, default=-1) userID = models.ForeignKey( Client, related_name='userIDWarehouse', unique=False, on_delete=models.CASCADE, default=-1) def __str__(self): return self.address + " " + self.warehouseNumber + " " + self.username + " " + self.userID class Shipping_Method(models.Model): username = models.ForeignKey( Client, related_name='usernameShipping_Method', on_delete=models.CASCADE, default=-1) userID = models.ForeignKey( Client, related_name='userIDShipping_Method', on_delete=models.CASCADE, default=-1) shippingMethod = models.CharField(max_length=50) class Meta: unique_together = (('userID', 'username')) def __str__(self): return self.userID + " " + self.username + " " + self.shippingMethod
997,627
13017394df56d2d97e3388605359b89267a8faaf
import pandas as pd import sqlite3 import os import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np dirname = "D:/Users/mgcha/Downloads" # fpaths = [os.path.join(dirname, "41467_2018_7746_MOESM7_ESM.txt")] con = sqlite3.connect(os.path.join(dirname, "41467_2018_7746_MOESM.db")) # for fpath in fpaths: # df = pd.read_csv(fpath, sep='\t') # loc = df['SNP(hg19)'].str.split(':', expand=True) # loc.columns = ['chromosome', 'start'] # df = pd.concat([df, loc], axis=1) # df = df.drop(['SNP(hg19)'], axis=1) # df['start'] = df['start'].astype(int) # df['end'] = df['start'] + 1 # columns = list(df.columns) # score = columns.pop(0) # columns.append(score) # df = df[columns] # dirname, fname = os.path.split(fpath) # df.to_sql(os.path.splitext(fname)[0], con, index=None, if_exists='replace') # exit(1) chromosome = 'chr9' start = 127047500 end = start + 2000 titles = ['active regions', 'tiled regions', 'high-resolution driver elements', 'SNPs with allele-specific activity (raw p-value < 0.05)'] n = 4 _, ax = plt.subplots(n) for i in range(n): tname = '41467_2018_7746_MOESM{}_ESM'.format(i+4) df = pd.read_sql("SELECT * FROM '{}' WHERE chromosome='{}' AND start<{} AND end>{}".format(tname, chromosome, end, start), con) for idx in df.index: buffer = np.zeros(end - start) sidx = df.loc[idx, 'start'] - start eidx = df.loc[idx, 'end'] - df.loc[idx, 'start'] + sidx buffer[sidx:eidx] = 1 xaxis = np.arange(buffer.shape[0]) ax[i].plot(xaxis, buffer) # ax[i].set_title(titles[i]) ax[i].set_xticks(xaxis[::400]) ax[i].set_xticklabels(['{}:{}'.format(chromosome, x) for x in range(start, end, 400)], fontsize=6) plt.show()
997,628
dea171d6ea141f6f3628d00afcb4d134ad95b3ad
# Jake Burton # 40278490 # SET11508 Emergent Computing for Optimisation. # Import relevant libraries. import pandas as pd import numpy as np from deap import algorithms from deap import base from deap import creator from deap import tools # import matplotlib.pyplot as plt # from scipy.stats import shapiro, ttest_ind, mannwhitneyu # read data data = pd.read_csv("clean-data.csv").reset_index(drop=True) # Initialise variables used throughout the algortihm num_players = len(data.index) points = data['Points'] costs = data['Cost'] MAX_COST = 100 # create lists with all elements initialised to 0 gk = np.zeros(num_players) mid = np.zeros(num_players) defe = np.zeros(num_players) stri = np.zeros(num_players) # Create arrays with boolean values representing the player types. for i in range(num_players): if data['Position'][i] == 'GK': gk[i] = 1 elif data['Position'][i] == 'DEF': defe[i] = 1 elif data['Position'][i] == 'MID': mid[i] = 1 elif data['Position'][i] == 'STR': stri[i] = 1 # Gets the total cost and points of the players, used to test the input file is # being read correctly, printing the results. TOTAL_COST = 0 for i in range(num_players): TOTAL_COST += costs[i] TOTAL_POINTS = 0 for i in range(num_players): TOTAL_POINTS += points[i] print("Total Cost: "+str(TOTAL_COST)) print("Total Points: "+str(TOTAL_POINTS)) # Function evaluates the provided team to make sure that the number of players # is 11. def eval_num_players(individual): num_bool = True num_check = np.sum(individual) if num_check != 11: num_bool = False return(num_bool) # Function checks that there is only one goalie in the provided team. # Returns True if number of goalies is equal to 1, else returns False. def eval_goalie(individual): goalie_bool = True goalie_check = np.sum(np.multiply(gk, individual)) if goalie_check != 1: goalie_bool = False return goalie_bool # Function checks that there are a valid number of defenders in the provided # team. # Returns True if num defenders is between 3 and 5, else returns False. def eval_def(individual): def_bool = True def_check = np.sum(np.multiply(defe, individual)) if def_check > 5 or def_check < 3: def_bool = False return def_bool # Function checks that there are a valid number of midfielders in the provided # team. # Returns True if num midfielders is between 3 and 5, else returns False. def eval_mid(individual): mid_bool = True mid_check = np.sum(np.multiply(mid, individual)) if mid_check > 5 or mid_check < 3: mid_bool = False return mid_bool # Function checks that there are a valid number of strikers in the provided # team. # Returns True if num strikers is between 1 and 3, else returns False. def eval_stri(individual): stri_bool = True stri_check = np.sum(np.multiply(stri, individual)) if stri_check > 3 or stri_check < 1: stri_bool = False return stri_bool # Function collates all of the evaluation methods and returns True is all # functions return True, else returns False def eval_team(ind): nm = eval_num_players(ind) go = eval_goalie(ind) de = eval_def(ind) mi = eval_mid(ind) st = eval_stri(ind) tot = (nm and go and de and mi and st) # print(tot) return tot # Evaluation Method, ensures that the team (individual) is valid. def evalKnapsack(individual): cost = 0.0 value = 0.0 for item in range(num_players): if (individual[item] == 1): cost += data['Cost'][item] value += data['Points'][item] ev_te = eval_team(individual) if(cost > MAX_COST or ev_te is False): return 0, return value, # Final Evaluation Method provided by Emma Hart def check_constraints(individual): broken_constraints = 0 # exactly 11 players c1 = np.sum(individual) if c1 != 11: broken_constraints += 1 print("total players is %s " % (c1)) # need cost <= 100" c2 = np.sum(np.multiply(costs, individual)) if c2 > 100: broken_constraints += 1 print("cost is %s " % (c2)) # need only 1 GK c3 = np.sum(np.multiply(gk, individual)) if c3 != 1: broken_constraints += 1 print("goalies is %s " % (c3)) # need less than 3-5 DEF" c4 = np.sum(np.multiply(defe, individual)) if c4 > 5 or c4 < 3: broken_constraints += 1 print("DEFE is %s " % (c4)) # need 3- 5 MID c5 = np.sum(np.multiply(mid, individual)) if c5 > 5 or c5 < 3: broken_constraints += 1 print("MID is %s " % (c5)) # need 1 -1 3 STR" c6 = np.sum(np.multiply(stri, individual)) if c6 > 3 or c6 < 1: broken_constraints += 1 print("STR is %s " % (c6)) # get indices of players selected selectedPlayers = [idx for idx, element in enumerate(individual) if element == 1] totalpoints = np.sum(np.multiply(points, individual)) print("total broken constraints: %s" % (broken_constraints)) print("total points: %s" % (totalpoints)) print("total cost is %s" % (c2)) print("selected players are %s" % (selectedPlayers)) return broken_constraints, totalpoints # Initialises a valid team, always returning a valid individual. # Takes in a blank Individual and the total number of players. # Gets the values for the player positions by creating an array of random # numbers between 0 and 1 and then multiplying all the values by the number of # players. The calling of the eval_team method and the while loop ensures that # the returned team does not have duplicates of the same player. def initialise_team(incc, num): while(1): int_team = (np.random.rand(11)*num) ind = incc(np.zeros(num)) for index in int_team: ind[int(index)] = 1 ev_te = eval_team(ind) if(ev_te is False): continue # print(ind) return ind MUTPB = 0.01 CXPB = 0.6 POPSIZE = 500 NGEN = 50 TNSIZE = 2 OPER = tools.cxTwoPoint # Adds all of the relevant DEAP functions. creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("individual", initialise_team, creator.Individual, num_players) toolbox.register("population", tools.initRepeat, list, toolbox.individual) # Main function def main(): # All of the operators for the EA used. toolbox.register("evaluate", evalKnapsack) toolbox.register("mate", OPER) toolbox.register("mutate", tools.mutFlipBit, indpb=MUTPB) toolbox.register("select", tools.selTournament, tournsize=TNSIZE) pop = toolbox.population(n=POPSIZE) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) pop, log = algorithms.eaSimple(pop, toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=NGEN, stats=stats, halloffame=hof, verbose=True) return pop, log, hof # FINAL SOLUTION CHECKER MUTPB = 0.01 CXPB = 0.6 POPSIZE = 5000 NGEN = 100 TNSIZE = 2 OPER = tools.cxTwoPoint pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for i in range(200): fit = max[i] if fit == best: break print(check_constraints(hof[0])) print("max fitness found is %s at generation %s" % (best, i)) # TESTING AND EXPERIMENTATION """ # Population Size Testing columns = ['popsize', 'fitness', 'genMaxFound'] df = pd.DataFrame(columns=columns) for POPSIZE in range(500, 2001, 500): for reps in range(10): pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for gen in range(NGEN): if max[gen] == best: break df = df.append({'popsize': POPSIZE, 'fitness': best, 'genMaxFound': gen}, ignore_index=True) # plot the boxplot of fitness per population size boxplot = df.boxplot(column=['fitness'], by=['popsize']) plt.savefig('fitvpop.png') # plot genMaxFound per population size boxplot = df.boxplot(column=['genMaxFound'], by=['popsize']) plt.savefig('genvpop.png') p500 = df.fitness[df.popsize == 500] p2000 = df.fitness[df.popsize == 2000] stat, p1 = shapiro(p500) stat, p2 = shapiro(p2000) if(p1 > 0.05 and p2 > 0.05): print("Both Gaussian") stat, p = ttest_ind(p500, p2000) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") else: print("One or more are not Gaussian") stat, p = mannwhitneyu(p500, p2000) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") """ """ # Tournament Size Testing columns = ['tournsize', 'fitness', 'genMaxFound'] df = pd.DataFrame(columns=columns) for TNSIZE in range(2, 11, 4): for reps in range(10): pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for gen in range(NGEN): if max[gen] == best: break df = df.append({'tournsize': TNSIZE, 'fitness': best, 'genMaxFound': gen}, ignore_index=True) # plot the boxplot of fitness per population size boxplot = df.boxplot(column=['fitness'], by=['tournsize']) plt.savefig('fitvtourn.png') # plot genMaxFound per population size boxplot = df.boxplot(column=['genMaxFound'], by=['tournsize']) plt.savefig('genvtourn.png') p2t = df.fitness[df.tournsize == 2] p10t = df.fitness[df.tournsize == 10] stat, p1 = shapiro(p2t) stat, p2 = shapiro(p10t) if(p1 > 0.05 and p2 > 0.05): print("Both Gaussian") stat, p = ttest_ind(p2t, p10t) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") else: print("One or more are not Gaussian") stat, p = mannwhitneyu(p2t, p10t) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") """ """ # Mutation probability testing columns = ['mutpb', 'fitness', 'genMaxFound'] df = pd.DataFrame(columns=columns) for MUTPBts in range(1, 6, 1): MUTPB = MUTPBts/100 for reps in range(10): pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for gen in range(NGEN): if max[gen] == best: break df = df.append({'mutpb': MUTPB, 'fitness': best, 'genMaxFound': gen}, ignore_index=True) boxplot = df.boxplot(column=['fitness'], by=['mutpb']) plt.savefig('fitvmut.png') boxplot = df.boxplot(column=['genMaxFound'], by=['mutpb']) plt.savefig('genvmut.png') p001 = df.fitness[df.mutpb == 0.01] p005 = df.fitness[df.mutpb == 0.05] stat, p1 = shapiro(p001) stat, p2 = shapiro(p005) if(p1 > 0.05 and p2 > 0.05): print("Both Gaussian") stat, p = ttest_ind(p001, p005) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") else: print("One or more are not Gaussian") stat, p = mannwhitneyu(p001, p005) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") """ """ # NGEN Testing columns = ['ngen', 'fitness', 'genMaxFound'] df = pd.DataFrame(columns=columns) for NGEN in range(10, 51, 10): for reps in range(10): pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for gen in range(NGEN): if max[gen] == best: break df = df.append({'ngen': NGEN, 'fitness': best, 'genMaxFound': gen}, ignore_index=True) boxplot = df.boxplot(column=['fitness'], by=['ngen']) plt.savefig('fitvsngen.png') p10 = df.fitness[df.ngen == 10] p50 = df.fitness[df.ngen == 50] stat, p1 = shapiro(p10) stat, p2 = shapiro(p50) if(p1 > 0.05 and p2 > 0.05): print("Both Gaussian") stat, p = ttest_ind(p10, p50) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") else: print("One or more are not Gaussian") stat, p = mannwhitneyu(p10, p50) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") """ """ # Mate operator testing types = [tools.cxOnePoint, tools.cxTwoPoint] columns = ['mate', 'fitness', 'genMaxFound'] df = pd.DataFrame(columns=columns) for index in range(len(types)): OPER = types[index] for reps in range(10): pop, log, hof = main() best = hof[0].fitness.values[0] max = log.select("max") for gen in range(NGEN): if max[gen] == best: break df = df.append({'mate': (index+1), 'fitness': best, 'genMaxFound': gen}, ignore_index=True) boxplot = df.boxplot(column=['fitness'], by=['mate']) plt.savefig('fitvmate.png') boxplot = df.boxplot(column=['genMaxFound'], by=['mate']) plt.savefig('genvmate.png') p1p = df.fitness[df.mate == 1] p2p = df.fitness[df.mate == 2] stat, p1 = shapiro(p1p) stat, p2 = shapiro(p2p) if(p1 > 0.05 and p2 > 0.05): print("Both Gaussian") stat, p = ttest_ind(p1p, p2p) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") else: print("One or more are not Gaussian") stat, p = mannwhitneyu(p1p, p2p) if p > 0.05: print("Probably from same distribution") else: print("Probably different distribution") """
997,629
267d5ed158875ec23d720946d4927a642357c28a
import math import wx class SuShu(wx.Frame): def __init__(self): wx.Frame.__init__(self, None, -1, "SuShu Frame") self.shu = [] self.sushu = [] self.panel = wx.Panel(self) size = wx.FlexGridSizer(0, 2, 10, 10) ShuStaticText = wx.StaticText(self.panel, -1, "Shu") ShuCtrlText = wx.TextCtrl(self.panel, -1,) def GetSuShu(self): self.shu = range(int(raw_input('enter one number')) + 1) i = len(self.shu) - 1 while i != 0: j = 2 while j != 0 : if i % j == 0: if i == 2: self.sushu.append(2) j = 0 elif j > math.sqrt(i): self.sushu.append(i) j = 0 else: j += 1 i -= 1 print self.sushu shu = SuShu() shu.GetSuShu()
997,630
643a2187ff6fb6ba22d62dbadc43b6b1d1b8a7b4
#! /usr/bin/env python from pylab import plot, show, savefig, grid, gca, legend, figure, title, \ xlabel, ylabel, ylim import silver_uniformpfem50 import silver_uniformpfem4 import silver_pfem import silver_hpfem def do_plot(x, y, n, l, color="k", label=""): n_r = n - l - 1 if n_r == 0: plot(x, y, color + "-", label=label) else: plot(x, y, color + "-") grid(True) ax = gca() xlabel("DOFs") ylabel("$E_{num}-E$ [Ha]") ax.set_yscale("log") ylim(ymin=1e-8) title("Eigenvalues (l=%d, Z=47)" % l) legend() n_eig = 50 l = 0 print "Saving to silver_l_0.png" for i in range(n_eig): n = l+1+i do_plot(silver_uniformpfem50.R_x[l], silver_uniformpfem50.R_y[n, l], n, l, "y", "uniform $p$-FEM, 50 elms (L)") for i in range(n_eig): n = l+1+i do_plot(silver_uniformpfem4.R_x[l], silver_uniformpfem4.R_y[n, l], n, l, "k", "uniform $p$-FEM, 4 elms (L)") for i in range(n_eig): n = l+1+i do_plot(silver_pfem.R_x[l], silver_pfem.R_y[n, l], n, l, "b", "$p$-FEM (L)") for i in range(n_eig): n = l+1+i do_plot(silver_hpfem.R_x[l], silver_hpfem.R_y[n, l], n, l, "r", "$hp$-FEM (L)") savefig("silver_l_0.png") savefig("silver_l_0.pdf")
997,631
3d2373630ca6a384eb6e4fa2132bf0e141d5216a
# -*- coding: utf-8 -*- """ Created on Fri Jan 18 14:18:27 2019 @author: Guest Group """ from ._free_property import metaProperty, FreeProperty __all__ = ('PropertyFactory',) # %% Property Factory def PropertyFactory(fget=None, fset=None, clsname=None, doc=None, units=None, slots=None): """Create an FreeProperty subclass with getter and setter functions. Parameters ---------- fget : function, optional Should return value of instances. If not given, a decorator expecting fget will be returned. fset : function, optional Should set the value of instances. clsname : str, optional Name of the class. Defaults to the function name of fget. doc : str, optional Docstring of class. Defaults to the docstring of fget. units : str, optional Units of measure. slots : tuple[str], optional Slots for class. Examples -------- The PropertyFactory is a FreeProperty class creator that functions similar to Python 'property' objects. Use the PropertyFactory to create a Weight class which calculates weight based on density and volume: .. code-block:: python >>> from free_properties import PropertyFactory >>> def getter(self): ... '''Weight (kg) based on volume (m^3).''' ... data = self.data ... rho = data['rho'] # Density (kg/m^3) ... vol = data['vol'] # Volume (m^3) ... return rho * vol >>> >>> def setter(self, weight): ... data = self.data ... rho = data['rho'] # Density (kg/m^3) ... data['vol'] = weight / rho >>> >>> # Initialize with a value getter, setter, and the class name. >>> Weight = PropertyFactory(fget=getter, fset=setter, clsname='Weight', units='kg') It is more convinient to use the PropertyFactory as a decorator: .. code-block:: python >>> @PropertyFactory(units='kg') >>> def Weight(self): ... '''Weight (kg) based on volume (m^3).''' ... data = self.data ... rho = data['rho'] # Density (kg/m^3) ... vol = data['vol'] # Volume (m^3) ... return rho * vol >>> >>> @Weight.setter >>> def Weight(self, weight): ... data = self.data ... rho = data['rho'] # Density (kg/m^3) ... data['vol'] = weight / rho Create dictionaries of data and initialize new Weight objects: .. code-block:: python >>> water_data = {'rho': 1000, 'vol': 3} >>> ethanol_data = {'rho': 789, 'vol': 3} >>> weight_water = Weight('Water', water_data) >>> weight_ethanol = Weight('Ethanol', ethanol_data) >>> weight_water <Water: 3000 kg> >>> weight_ethanol <Ethanol: 2367 kg> .. Note:: The units are taken from the the function docstring. The first word in parentesis denotes the units. These properties behave just like their dynamic value: .. code-block:: python >>> weight_water + 30 3030 >>> weight_water + weight_ethanol 5367 Get and set the value through the 'value' attribute: .. code-block:: python >>> weight_water.value 3000 >>> weight_water.value = 4000 >>> weight_water.value 4000.0 >>> water_data # Note that the volume changed too {'rho': 1000, 'vol': 4.0} In place magic methods will also change the property value: .. code-block:: python >>> weight_water -= 1000 >>> weight_water <Water: 3000 kg> >>> water_data # The change also affects the original data {'rho': 1000, 'vol': 3.0} """ if not fget: return lambda fget: PropertyFactory(fget, fset, clsname, doc, units, slots) # Defaults if clsname is None: clsname = fget.__name__ if doc is None: doc = fget.__doc__ definitions = {'__doc__': doc, '__slots__': slots or ('name', 'data'), '__module__': fget.__module__, '_units': units, 'value': property(fget, fset)} return metaProperty(clsname, (FreeProperty,), definitions)
997,632
5bd4def52345c5d7a97d7c6d35e81834b0ea4a90
#!/usr/bin/python2.7 import codecs import re import json import os import argparse from snifferCommons import * def main(): # input: allTopicTweets = [(tweet 1 id, tweet 1 text, tweeter user id, mentioned screen_names, hashtags, tweet class), ...], # histories = [(user 1 id, [tweet 1 text, tweet 2 text, ...]), ...], # users = [(user 1 id, user 1 screen_name, user alignment, user class)] parser = argparse.ArgumentParser() parser.add_argument('baseFileName', help="enter base name without the extension") parser.add_argument('lastPartialNumber', help="enter the last partial file with any leading zeros") args = parser.parse_args() baseName = args.baseFileName hashtagJson = generateHashtagDictFileName(baseName) with codecs.open(hashtagJson, "r", "utf-8") as file: classifiedHashtags = json.load(file) # for now ignore case of whole file, best is to just update format from start so this script is unnecessary endingPartialNumber = int(args.lastPartialNumber) ordinal = "{" + ":0{}d".format(len(args.lastPartialNumber)) + "}" allTopicTweets = list() histories = dict() users = dict() currentNum = 1 while currentNum <= endingPartialNumber: print "Next ordinal:", ordinal.format(currentNum) topicJson, historyJson, userJson = generateOldIntermediateFileNames(baseName + ordinal.format(currentNum)) if os.path.exists(topicJson) and os.path.exists(topicJson) and os.path.exists(topicJson): with codecs.open(topicJson, "r", "utf-8") as file: allTopicTweets.extend(json.load(file)) with codecs.open(historyJson, "r", "utf-8") as file: histories.update(json.load(file)) with codecs.open(userJson, "r", "utf-8") as file: users.update(json.load(file)) else: print "Error: full set of files for {} do not exist in current directory".format(baseName + ordinal.format(currentNum)) exit() currentNum += 1 # read in dict of { hashtag: class, hashtag: class, ... } # print the statistics for the dict: # - # of tags # - distribution over ANT, UNK, PRO # - # of tags present in both dict and tweets # - # of tags present in only dict, # of tags present in only tweets # do need to look at all users at once? only when using to vote for tweets, so search through each file then # deduping was done jankily, make sure deduping is included in flow of scripts -> rawSplit.py # do need to look at all user histories at once? search through each file by user id when necesary # reformat histories to be dict of userId => [texts] # users to be dict of userId => [screen_name, alignment, class] # trumpTweets dict of userId => { tweetId => text, tweetId => text } # do need to look at all trump tweets at once? loop through files when voting via hashtags, when voting for users, when auxiliary voting via hashtagDict = { hashtag: classifiedHashtags[hashtag]["class"] for hashtag in classifiedHashtags } allTopicTweets = [ primaryAssignment(tweet, hashtagDict) for tweet in allTopicTweets ] printClassDistribution(allTopicTweets, "trump tweets") usersList = list() for user in users: users[user].update({"userId": user}) usersList.append(users[user]) tweetDict = { int(tweet["userId"]): tweet for tweet in allTopicTweets } usersList = [ userClassAssignment(user, tweetDict) for user in usersList ] printClassDistribution(usersList, "users") screenNameDict = { users[user]["screenName"]: users[user]["class"] for user in users } # allTopicTweets = [ auxiliaryAssignment(tweetDict[tweet], screenNameDict) for tweet in tweetDict ] # printClassDistribution(allTopicTweets, "trump tweets") # usersList = [ userClassAssignment(user, tweetDict) for user in usersList ] # printClassDistribution(usersList, "users") categorizedHistories = [] for user in usersList: categorizedHistories.append({ "class": user["class"], "tweets": histories[user["userId"]]["tweetTexts"] }) # only user class and nontrump history with codecs.open(generateOutputFileName(baseName), "w+", "utf-8") as file: json.dump(categorizedHistories, file, indent=4, separators=(',', ': ')) # output: # categorizedHistories = [(user 1 class, [tweet 1 text, tweet 2 text, ...]), (user 2 class, [tweet 1 text, ...]), ...] # if categorized, change user's alignment distribution: (+1, -1, +0) for anti, (+0, -1, +1) for pro def primaryAssignment(tweet, hashtagDict): # hashtagDict = {hashtag: hashtag class, ...} tweet = vote(tweet, "hashtags", hashtagDict) return tweet def auxiliaryAssignment(tweet, screennames): tweet = vote(tweet, "mentionedScreenNames", screennames) return tweet def vote(item, voterKey, voterDict): if "alignment" not in item: # should have added to all dicts and didn't item["alignment"] = [0, 0, 0] for voter in item[voterKey]: voter = voter.lower() if voter in voterDict: if voterDict[voter] == PRO: item["alignment"][RIGHT] += 1 elif voterDict[voter] == ANT: item["alignment"][LEFT] += 1 else: item["alignment"][NEUT] += 1 votes = item["alignment"] if votes[RIGHT] > votes[LEFT]: item["class"] = PRO elif votes[RIGHT] < votes[LEFT]: item["class"] = ANT else: item["class"] = UNK return item # DP: Allow unassigned to be an end category or prioritize pro/anti? For now, let it be a category def userClassAssignment(user, tweetDict): # allTopicTweets = [(tweet 1 id, tweet 1 text, tweeter user id, mentioned screen_names, hashtags, tweet class), ...] if "alignment" in user: votes = user["alignment"] else: # should have added to all dicts and didn't votes = [0, 0, 0] userId = int(user["userId"]) if userId in tweetDict: # there IS possibility that user cannot be found due to differences in querying times between steps! if tweetDict[userId]["class"] == PRO: votes[RIGHT] += 1 votes[NEUT] -= 1 elif tweetDict[userId]["class"] == ANT: votes[LEFT] += 1 votes[NEUT] -= 1 if votes[RIGHT] > votes[LEFT]: user["class"] = PRO elif votes[RIGHT] < votes[LEFT]: user["class"] = ANT else: user["class"] = UNK user["alignment"] = votes return user def printClassDistribution(items, description): distribution = [0, 0, 0] for item in items: if item is not None: if item["class"] == ANT: distribution[LEFT] += 1 elif item["class"] == PRO: distribution[RIGHT] += 1 else: distribution[NEUT] += 1 print "Class distribution of {}:".format(description) print distribution if __name__ == '__main__': main()
997,633
e278d344f4a98e6fbcea1031d91e458b781a3855
n=int(input()) s=list(map(int,input().split())) leftL=[0] rightS=[0]*n for i in range(n): if i==0: leftL[i]=0 else: leftL.append(max(leftL[i-1],s[i-1])) for i in range(n-1,-1,-1): if i==n-1: rightS[i]=float('inf') else: rightS[i]=min(rightS[i+1],s[i+1]) count=0 zhuyuan=[] for i in range(n): if leftL[i]<s[i]<rightS[i]: count+=1 zhuyuan.append(str(s[i])) print(count) print(' '.join(zhuyuan))
997,634
4177f6fca2b6a01300696852dc81ada0890e0909
#!/usr/bin/python3 """ a simple script of I/O""" def read_file(filename=""): """is a function to print the content of a file""" with open(filename, 'r', encoding='utf-8') as fl: print(fl.read(), end='')
997,635
d1faacaf71f3766058e6c5739cb92d7c49ff70ce
"""A simple Naive-Bayes classifier designed to train on any number of features, and then classify documents into one of two mutually exclusive categories. Implements the algorithm described for spam filtering here: http://en.wikipedia.org/wiki/Naive_Bayes_classifier#Document_classification """ from collections import defaultdict from math import log, exp class NaiveBayesClassifier(object): def __init__(self): self.label_feature_lookup = defaultdict(lambda: defaultdict(int)) self.label_total_feature_counts = defaultdict(int) self.label_total_document_counts = defaultdict(int) def train(self, labeled_features): """Accepts a list of labeled features -- tuples of format (label, feature_vector), and learns feature weights""" for label, feature_vec in labeled_features: self.label_total_document_counts[label] += 1 for feature in feature_vec: self.label_feature_lookup[label][feature] += 1 self.label_total_feature_counts[label] += 1 self.all_labels = self.label_total_document_counts.keys() def classify(self, feature_vec, label1, label2): '''This function ''' total_weight = 0 for feature in feature_vec: p_feature_given_label1 = ((self.label_feature_lookup[label1][feature] + 1.0)/ (self.label_total_feature_counts[label1] + 1.0)) p_feature_given_label2 = ((self.label_feature_lookup[label2][feature] + 1.0)/ (self.label_total_feature_counts[label2] + 1.0)) total_weight += log(p_feature_given_label1/p_feature_given_label2) prior_factor = log((self.label_total_document_counts[label1] + 1.0)/ (self.label_total_document_counts[label2] + 1.0)) if prior_factor + total_weight > 0: return label1 else: return label2
997,636
2b1b9fa5db1a864e0353fced311113efb30982e4
''' author: faraz mazhar descri: A PySpark script. MapPartition on DataFrame. DataFrame structure: +-------+----------+ | years | salary | +-------+----------+ | 1.1 | 39343.00 | | 1.3 | 46205.00 | | 1.5 | 37731.00 | | ... | ... | +-------+----------+ Answer on 'salary_mini.csv': +-----+--------+ |years| salary| +-----+--------+ | 5.9|166804.0| | 14.5|275573.0| +-----+--------+ ''' # Disabling pylint false positive on PySpark import. # pylint: disable=E0401 from pyspark.sql import SparkSession, Row import pyspark.sql.functions as f import pandas as pd # Boiler plate code. spark = SparkSession.builder \ .config("spark.sql.warehouse.dir", "file:///C:/temp") \ .appName("JoinMovieRatingsApp") \ .getOrCreate() spark.sparkContext.setLogLevel('ERROR') # An example function for mapPartition. def func(iterator): yield sum(iterator) # Partition logic for df. salarydf = spark.read.csv('salary_mini.csv', header=True) years = spark.sparkContext.parallelize( salarydf.rdd.map(lambda x: float(x['years'])).collect(), 2 ) \ .mapPartitions(func) \ .collect() salary = spark.sparkContext.parallelize( salarydf.rdd.map(lambda x: float(x['salary'])).collect(), 2 ) \ .mapPartitions(func) \ .collect() df = spark.createDataFrame(list(zip(years, salary)), schema=['years', 'salary']) df.show() spark.stop()
997,637
59328e2e084846d57151f208bca26f9c3f472fb7
from django.contrib.sitemaps import Sitemap from plumbaker.apps.press.models import Release from plumbaker.apps.inventory.models import Good import datetime class NewsSitemap(Sitemap): changefreq = 'monthly' priority = 0.5 def items(self): return Release.live.all() def lastmod(self, obj): return obj.pub_date def location(self, obj): return "/press/%s" % obj.slug class GoodsSitemap(Sitemap): changefreq = 'monthly' priority = 0.5 def items(self): return Good.objects.all() def location(self, obj): return "/baked-goods/%s" % obj.slug
997,638
f28ac7d47bd2987c813e575347525fb196d6a425
from pprint import pprint import datetime from Google import Create_Service CLIENT_SECRET_FILE = 'credentials.json' API_NAME = 'calendar' API_VERSION = 'v3' SCOPES = ['https://www.googleapis.com/auth/calendar'] service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES) now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time startDate = '2020-10-24T09:00:00-05:00' endDate = '2020-10-24T10:00:00-05:00' #-06:00 is the time zone (Central) summary = "Math Class" event = { 'summary': summary, 'start': { 'dateTime': startDate, 'timeZone': 'America/Chicago', }, 'end': { 'dateTime': endDate, 'timeZone': 'America/Chicago', } } event = service.events().insert(calendarId='primary', body=event).execute() # print('Event created: %s' % (event.get('htmlLink')) # if __name__ == '__main__': # main() # print('Getting the upcoming 10 events') # events_result = service.events().list(calendarId='primary', timeMin=now, # maxResults=10, singleEvents=True, # orderBy='startTime').execute() # events = events_result.get('items', []) # if not events: # print('No upcoming events found.') # for event in events: # start = event['start'].get('dateTime', event['start'].get('date')) # print(start, event['summary']) # from __future__ import print_function # import datetime # import pickle # import os.path # from googleapiclient.discovery import build # from google_auth_oauthlib.flow import InstalledAppFlow # from google.auth.transport.requests import Request # # If modifying these scopes, delete the file token.pickle. # SCOPES = ['https://www.googleapis.com/auth/calendar'] # CLIENT_SECRET_FILE = 'credentals.json' # def main(): # """Shows basic usage of the Google Calendar API. # Prints the start and name of the next 10 events on the user's calendar. # """ # creds = None # # The file token.pickle stores the user's access and refresh tokens, and is # # created automatically when the authorization flow completes for the first # # time. # if os.path.exists('token.pickle'): # with open('token.pickle', 'rb') as token: # creds = pickle.load(token) # # If there are no (valid) credentials available, let the user log in. # if not creds or not creds.valid: # if creds and creds.expired and creds.refresh_token: # creds.refresh(Request()) # else: # flow = InstalledAppFlow.from_client_secrets_file( # 'credentials.json', SCOPES) # creds = flow.run_local_server(port=0) # # Save the credentials for the next run # with open('token.pickle', 'wb') as token: # pickle.dump(creds, token) # service = build('calendar', 'v3', credentials=creds) # # Call the Calendar API # now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time # print('Getting the upcoming 10 events') # events_result = service.events().list(calendarId='primary', timeMin=now, # maxResults=10, singleEvents=True, # orderBy='startTime').execute() # events = events_result.get('items', []) # if not events: # print('No upcoming events found.') # for event in events: # start = event['start'].get('dateTime', event['start'].get('date')) # print(start, event['summary'])
997,639
46862fa411015afaa2cabc40b863503edb0a38ab
from typing import Optional, List from base64 import urlsafe_b64decode import iso8601 from .extensions import broker, APP_QUEUE_NAME from . import procedures @broker.actor(queue_name=APP_QUEUE_NAME) def create_formal_offer( payee_creditor_id: int, offer_announcement_id: int, debtor_ids: List[int], debtor_amounts: List[int], valid_until_ts: str, description: Optional[dict] = None, reciprocal_payment_debtor_id: Optional[int] = None, reciprocal_payment_amount: int = 0) -> None: """Creates a new formal offer to supply some goods or services. The `payee_creditor_id` offers to deliver the goods or services depicted in `description` if a payment is made to his account via one of the debtors in `debtor_ids` (with the corresponding amount in `debtor_amounts`). The offer will be valid until `valid_until_ts`. `offer_announcement_id` is a number generated by the payee (who creates the offer), and must be different for each offer announced by a given payee. If `reciprocal_payment_debtor_id` is not `None`, an automated reciprocal transfer (for the `reciprocal_payment_amount`, via this debtor) will be made from the payee to the payer when the offer is paid. This allows formal offers to be used as a form of currency swapping mechanism. Before sending a message to this actor, the sender must create a Formal Offer (FO) database record, with a primary key of `(payee_creditor_id, offer_announcement_id)`, and status "initiated". This record will be used to act properly on `CreatedFromalOfferSignal`, `SuccessfulPaymentSignal`, and `CanceledFormalOfferSignal` events. CreatedFromalOfferSignal ------------------------ If a `CreatedFromalOfferSignal` is received for an "initiated" FO record, the status of the FO record must be set to "created", and the received values for `offer_id` and `offer_secret` -- recorded. If a `CreatedFromalOfferSignal` is received for an already "created", "paid", or "canceled" FO record, the corresponding values of `offer_id` must be compared. If they are the same, no action should be taken. If they differ, the newly created offer must be immediately canceled (by sending a message to the `cancel_formal_offer` actor). If a `CreatedFromalOfferSignal` is received, but a corresponding FO record is not found, the newly created offer must be immediately canceled. SuccessfulPaymentSignal ----------------------- If a `SuccessfulPaymentSignal` is received for a "created" FO record, the status of the FO record should be set to "paid". If a `SuccessfulPaymentSignal` is received in any other case, no action should be taken. CanceledFormalOfferSignal ------------------------- If a `CanceledFormalOfferSignal` is received for a "created" FO record, the status of the FO record must be set to "canceled". If a `CanceledFormalOfferSignal` is received in any other case, no action should be taken. IMPORTANT NOTES: 1. "initiated" FO records must not be deleted. 2. "created" FO records must not be deleted, Instead, they could be requested to be canceled (by sending a message to the `cancel_formal_offer` actor). 3. "paid" or "canceled" FO records can be deleted whenever considered appropriate. """ procedures.create_formal_offer( payee_creditor_id, offer_announcement_id, debtor_ids, debtor_amounts, iso8601.parse_date(valid_until_ts), description, reciprocal_payment_debtor_id, reciprocal_payment_amount, ) @broker.actor(queue_name=APP_QUEUE_NAME) def cancel_formal_offer( payee_creditor_id: int, offer_id: int, offer_secret: str) -> None: """Requests the cancellation of a formal offer. If the offer has been successfully canceled, a `CanceledFormalOfferSignal` will be sent. If the offer has received a payment in the meantime, a `SuccessfulPaymentSignal` will be sent instead. Nothing happens if an offer with the given `payee_creditor_id`, `offer_id`, and `offer_secret` does not exist. """ procedures.cancel_formal_offer( payee_creditor_id, offer_id, urlsafe_b64decode(offer_secret), ) @broker.actor(queue_name=APP_QUEUE_NAME) def make_payment_order( payee_creditor_id: int, offer_id: int, offer_secret: str, payer_creditor_id: int, payer_payment_order_seqnum: int, debtor_id: int, amount: int, proof_secret: str, payer_note: dict = {}) -> None: """Tries to make a payment to a formal offer. If the payment is successfull, a `SuccessfulPaymentSignal` will be sent. If the payment is not successful, a `FailedPaymentSignal` will be sent. """ procedures.make_payment_order( payee_creditor_id, offer_id, urlsafe_b64decode(offer_secret), payer_creditor_id, payer_payment_order_seqnum, debtor_id, amount, urlsafe_b64decode(proof_secret), payer_note, ) @broker.actor(queue_name=APP_QUEUE_NAME, event_subscription=True) def on_prepared_payment_transfer_signal( debtor_id: int, sender_creditor_id: int, transfer_id: int, coordinator_type: str, recipient_creditor_id: int, sender_locked_amount: int, prepared_at_ts: str, coordinator_id: int, coordinator_request_id: int) -> None: assert coordinator_type == 'payment' procedures.process_prepared_payment_transfer_signal( debtor_id, sender_creditor_id, transfer_id, recipient_creditor_id, sender_locked_amount, coordinator_id, coordinator_request_id, ) @broker.actor(queue_name=APP_QUEUE_NAME, event_subscription=True) def on_rejected_payment_transfer_signal( coordinator_type: str, coordinator_id: int, coordinator_request_id: int, details: dict) -> None: assert coordinator_type == 'payment' procedures.process_rejected_payment_transfer_signal( coordinator_id, coordinator_request_id, details, ) # TODO: Handle the `FinalizedTransferSignal` signal.
997,640
aec5b62cbcf55a1c05716cf4fba5442256c0fb88
import uuid import pytest from .pages.product_page import ProductPage from .pages.login_page import LoginPage from .pages.basket_page import BasketPage from .pages.main_page import MainPage import time # @pytest.mark.parametrize('link', ["http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer0", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer1", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer2", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer3", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer4", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer5", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer6", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer7", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer8", # "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer9"]) @pytest.mark.users_tests class TestUserAddToBasketFromProductPage(object): @pytest.fixture(scope="function", autouse=True) def setup(self, browser): link = "http://selenium1py.pythonanywhere.com/" self.page = MainPage(browser, link) self.page.open() self.page.go_to_login_page() self.login_page = LoginPage(browser, browser.current_url) email = str(time.time()) + "@stepikmail.org" password = uuid.uuid4().hex self.login_page.register_new_user(email, password) def test_user_cant_see_success_message(self, browser): link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209" page = ProductPage(browser, link) page.open() page.should_not_be_success_message() # call is_not_element_present @pytest.mark.need_review def test_user_can_add_product_to_basket(self, browser): link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=newYear2019" page = ProductPage(browser, link) page.open() page.should_be_btn() page.click_btn_add_to_basket() page.solve_quiz_and_get_code() page.should_be_product_added_to_basket_message() page.should_be_basket_total_message() @pytest.mark.need_review def test_guest_can_add_product_to_basket(browser): link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=newYear2019" page = ProductPage(browser, link) page.open() page.should_be_product() page.add_product_to_basket() page.solve_quiz_and_get_code() page.should_be_product_added_to_basket_message() page.should_be_basket_total_message() @pytest.mark.xfail def test_guest_cant_see_success_message_after_adding_product_to_basket(browser): link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_207" page = ProductPage(browser, link) page.open() page.click_btn_add_to_basket() page.should_not_be_success_message() # call is_not_element_present def test_guest_cant_see_success_message(browser): link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_207" page = ProductPage(browser, link) page.open() page.should_not_be_success_message() # call is_not_element_present @pytest.mark.xfail def test_message_dissapeared_after_adding_product_to_basket(browser): link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_207" page = ProductPage(browser, link) page.open() page.click_btn_add_to_basket() page.should_success_message_disappeared() def test_guest_should_see_login_link_on_product_page(browser): link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/" page = ProductPage(browser, link) page.open() page.should_be_login_link() @pytest.mark.need_review def test_guest_can_go_to_login_page_from_product_page(browser): link = "http://selenium1py.pythonanywhere.com/" page = ProductPage(browser, link) page.open() page.should_be_login_link() page.go_to_login_page() login_page = LoginPage(browser, browser.current_url) login_page.should_be_login_page() @pytest.mark.need_review def test_guest_cant_see_product_in_basket_opened_from_product_page(browser): link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209?promo=midsummer" page = ProductPage(browser, link) page.open() page.should_be_product() page.go_to_basket_top() basket_page = BasketPage(browser, browser.current_url) basket_page.should_be_busket() basket_page.should_not_be_items_in_basket() basket_page.should_be_basket_is_empty_message()
997,641
2111a72d2f61d601bc91f6fd7cbf1d176e100e01
from entity_linkage.normalization.entity_normalization import EntityNormalization from entity_linkage.normalization.sgtb import read_data from entity_linkage.normalization.sgtb.structured_gradient_boosting import StructuredGradientBoosting import gzip, time, os, sys from sklearn.externals import joblib class StructuredGradientTreeBoosting(EntityNormalization): @classmethod def read_dataset(cls, dataset_name, split_ratio, options={}): ''' :param dataset_name (str): name of dataset :param split_ratio (tuple): (train_ratio, validation_ration, test_ratio) :param kwargs: other parameters for specific model (optional) :return (tuple): train_data, valid_data, test_data ''' train_data, valid_data, test_data = read_data.read_data(dataset_name, split_ratio, options) return train_data, valid_data, test_data @classmethod def train(cls, train_dev_set): ''' :param train_set (list): a list of training data :return (Model): trained model ''' train_set, dev_set = train_dev_set ent_ent_feat_dict = cls._read_feat(cls) print("Loading features... Finished!") print("Start training...") clf = StructuredGradientBoosting(max_depth=3, learning_rate=1.0, n_estimators=250, min_samples_split=2, min_samples_leaf=1, ent_ent_feat_dict=ent_ent_feat_dict, beam_width=4, num_thread=8) start_time = time.time() clf = clf.fit(train_set, dev_set) end_time = time.time() print ("Training take %.2f secs" %(end_time - start_time)) return clf @classmethod def predict(cls, clf, test_set): ''' :param model (Model): a trained model :param test_set (list): a list of test data :return (list): a list of prediction, each item with the format (entity_name, wikipedia_url(optional), geolocation_url(optional), geolocation_boundary(optional)) ''' if clf is None: raise Exception("model is neither trained nor loaded") #find ditk_path from sys.path ditk_path = "" for path in sys.path: if "ditk" in path: ditk_path = path test_X, _, test_indices, test_ent_ids = test_set test_pred = clf.predict(test_X, test_indices, test_ent_ids) output = [] for label, pred in zip(test_indices[0][1], test_pred): output.append((label, pred, "", "")) output_file = ditk_path+"/entity_linkage/normalization/sgtb/result/output.txt" if not os.path.exists(os.path.dirname(output_file)): os.makedirs(os.path.dirname(output_file), exist_ok=True) with open(output_file, "w") as f: for entity, wiki_url, geo_url, geo_bnd in output: f.write(entity + ", " + wiki_url + ", " + geo_url + ", " + geo_bnd + "\n") return output """ TODO: 1. implement F1 metrics that originally not available """ @classmethod def evaluate(cls, clf, eval_set): ''' :param model (Model): a trained model :param eval_set (list): a list of validation data :return (tuple): (precision, recall, f1 score) ''' if clf is None: raise Exception("model is neither trained nor loaded") eval_X, eval_y, eval_indices, eval_ent_ids = eval_set eval_acc = clf.get_acc(eval_X, eval_y, eval_indices, eval_ent_ids) print ("Test acc %.2f" %(eval_acc)) return eval_acc @classmethod def save_model(cls, clf, file_name): print("start saving model...") if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name), exist_ok=True) joblib.dump(clf, file_name) print("Finished saving model!") def load_model(cls, file_name): if not os.path.exists(file_name): print("model not exists... init model...") ent_ent_feat_dict = cls._read_feat() clf = StructuredGradientBoosting(max_depth=3, learning_rate=1.0, n_estimators=250, min_samples_split=2, min_samples_leaf=1, ent_ent_feat_dict=ent_ent_feat_dict, beam_width=4, num_thread=8) print("Finished initiating model!") else: print("start loading model...") clf = joblib.load(file_name) print("Finished loading model!") return clf def _read_feat(cls): print("Loading features...") #find ditk_path from sys.path ditk_path = "" for path in sys.path: if "ditk" in path: ditk_path = path # entity-entity features feat_file = ditk_path+"/entity_linkage/normalization/sgtb/data/ent_ent_feats.txt.gz" ent_ent_feat_dict = {} with gzip.open(feat_file, 'rb') as f: for line in f: ep, feat_str = line.split(b'\t') e1, e2 = ep.split() feats = [float(x) for x in feat_str.split()] ent_ent_feat_dict[(e1,e2)] = feats return ent_ent_feat_dict
997,642
694baf50fa6a67c166a4ae7fadaaa5bdffdaed0d
from scrapy import cmdline # 免认证 #上海厚崇资产-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_PingAnDaHua', '-a', 'jobId=0L']) #cmdline.execute(['scrapy', 'crawl', 'FundNotice_PingAnDaHua', '-a', 'jobId=0L']) #中邮证券-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_zystock', '-a', 'jobId=0L']) #渤海证券-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_bhhjamc', '-a', 'jobId=0L']) #九沐资本-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_JiuMuAssets', '-a', 'jobId=0L']) #平安资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_PingAnAssets', '-a', 'jobId=0L']) #赋成资产-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_FuChegnAsset', '-a', 'jobId=0L']) #福能期货-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_FuNengFuture', '-a', 'jobId=0L']) #广东天贝合资产-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_GuangDongTianBeiHeAsset', '-a', 'jobId=0L']) #上海老渔民投资-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ShangHaiLaoYuMinInvest', '-a', 'jobId=0L']) #银叶投资-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YinYeInvest', '-a', 'jobId=0L']) #丰润投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_FengRuiInvest', '-a', 'jobId=0L']) #华炎投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_HuaYanInvest', '-a', 'jobId=0L']) #骏胜资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_JunShengAsset', '-a', 'jobId=0L']) #牟合资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_MuHeAsset', '-a', 'jobId=0L']) #南土资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_NanTuAsset', '-a', 'jobId=0L']) #上海庐雍资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ShangHaiLuYongAsset', '-a', 'jobId=0L']) #中粮信托-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ZhongLiangXinTuo', '-a', 'jobId=0L']) #上海同亿富利投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_shtyflInvest', '-a', 'jobId=0L']) #首创期货-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ShouChuangInvest', '-a', 'jobId=0L']) #财富证券资管-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_CaiFuStockZiGuan', '-a', 'jobId=0L']) #粤财信托-信托净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YueCaiXinTuo', '-a', 'jobId=0L']) #哲实实业-投顾顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZheShiShiYe', '-a', 'jobId=0L']) #言起投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YanQiInvest', '-a', 'jobId=0L']) #云国投-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YanQiInvest', '-a', 'jobId=0L']) #招商证券-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZhaoShangStock', '-a', 'jobId=0L']) #源实资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YuanShiAsset', '-a', 'jobId=0L']) #方正东亚信托-信托净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_FangZhengDongYaTrust', '-a', 'jobId=0L']) #易同投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YiTongInvest', '-a', 'jobId=0L']) #杭州德锐资本投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_DeRuiAssetInvest', '-a', 'jobId=0L']) #山楂树投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ShanZhaShuInvest', '-a', 'jobId=0L']) #银莅资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YinLiAsset', '-a', 'jobId=0L']) #重庆国际信托-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ChongQingGuoJiTrust', '-a', 'jobId=0L']) #中银国际-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZhongYinGuoJiStock', '-a', 'jobId=0L']) #星鸿资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_XingHongAsset', '-a', 'jobId=0L']) #盈盾资本-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YingDunAssetNotice', '-a', 'jobId=0L']) #中航证券-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ZhongHangStockNotice', '-a', 'jobId=0L']) #北京佑瑞持投资-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_BieJingYouRuiChiInvest', '-a', 'jobId=0L']) #博洋投资-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_BoYangInvestNotice', '-a', 'jobId=0L']) #鼎力投资-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_DingLiInvestNotice', '-a', 'jobId=0L']) #大柏地投资-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_DaBoDiInvestNotice', '-a', 'jobId=0L']) #百瑞信托公告-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_BaiRuiTurstNotice', '-a', 'jobId=0L']) #安徽翔海资产-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_AnHuiXiangHaiAssetNotice', '-a', 'jobId=0L']) # 自动登录 #浙江慧安家族财富投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZhejiangHuian', '-a', 'jobId=0L']) #中域投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZhongyuInvset', '-a', 'jobId=0L']) #蓝海韬略-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_LanhaiTaolue', '-a', 'jobId=0L']) #东航金控-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_DongHangJinKong', '-a', 'jobId=0L']) #安徽嘉和投资-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_JiaHeInvest', '-a', 'jobId=0L']) #呈瑞投资-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ChengRuiInvest', '-a', 'jobId=0L']) #徽商期货-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_HuiShangFuture', '-a', 'jobId=0L']) #新里程碑资产-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_XinLiChengBeiAsset', '-a', 'jobId=0L']) #海通期货-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_HaiTongFuture', '-a', 'jobId=0L']) #华菁证券资管-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_HuaJIngStock', '-a', 'jobId=0L']) #上海游马地投资-券商资管公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ShangHaiYouMaDiInvest', '-a', 'jobId=0L']) #永兴投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YongXingInvest', '-a', 'jobId=0L']) #深圳老虎汇资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ShenZhenLaoHuHuiAsset', '-a', 'jobId=0L']) #大岩资本-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_DaYanAsset', '-a', 'jobId=0L']) #映雪投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YingXueInvest', '-a', 'jobId=0L']) #艾方资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_AiFangAsset', '-a', 'jobId=0L']) #从容投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_CongRongInvest', '-a', 'jobId=0L']) #泓澄投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HongChengInvest', '-a', 'jobId=0L']) #慧安投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HuiAnInvest', '-a', 'jobId=0L']) #资舟资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZiZhouAsset', '-a', 'jobId=0L']) #山东银企投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ShanDongYinQiInvest', '-a', 'jobId=0L']) #合晟资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HeShengAsset', '-a', 'jobId=0L']) #云程泰投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YunChengTaiInvest', '-a', 'jobId=0L']) #辰阳投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_ChengYangInvest', '-a', 'jobId=0L']) #宝蓁投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_BaoZhenInvest', '-a', 'jobId=0L']) #杭州明曦资本-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HangZhouMingXiAsset', '-a', 'jobId=0L']) #厦门博孚利资产-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_XiaMenBoFuLiAsset', '-a', 'jobId=0L']) #盈盾资本-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YingDunAssetNav', '-a', 'jobId=0L']) #南华期货-公告 cmdline.execute(['scrapy', 'crawl', 'FundNotice_NanHuaFutureNotice', '-a', 'jobId=0L']) # Cookie认证 #艾叶投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_AiYeInvest', '-a', 'jobId=0L']) #东海期货-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_DongHaiFuture', '-a', 'jobId=0L']) #穗富投资-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HuiFuInvest', '-a', 'jobId=0L']) #昶享资产-投资顾问 #cmdline.execute(['scrapy', 'crawl', 'FundNav_HaiShangChangXiangAsset', '-a', 'jobId=0L']) #昭图投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_BaoChiInvest', '-a', 'jobId=0L']) #国金证券资管-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_GuoJinStockZiGuan', '-a', 'jobId=0L']) #易凡资产-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YiFanAsset', '-a', 'jobId=0L']) #易凡资产-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YiFanAssetCookie', '-a', 'jobId=0L']) #翼虎投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YiHuInvest', '-a', 'jobId=0L']) #银石投资-投顾净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_YinShiInvest', '-a', 'jobId=0L']) #中信证券-券商资管净值 #cmdline.execute(['scrapy', 'crawl', 'FundNav_ZhongXinStockNav', '-a', 'jobId=0L']) #北京中外建投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_BeiJingZhongWeiJianInvest', '-a', 'jobId=0L']) #银河证券公告-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YinHeStockNotice', '-a', 'jobId=0L']) #银石投资-公告 #cmdline.execute(['scrapy', 'crawl', 'FundNotice_YinShiInvestNotice', '-a', 'jobId=0L'])
997,643
23119007af7ace382e6e58ff48d48be6089c6b17
from lib.boogie.ast import stmt_changed, AstAssignment, AstId, AstHavoc, \ AstAssert, AstTrue, replace from lib.boogie.z3_embed import Or, And, Int, And, stmt_to_z3, \ AllIntTypeEnv, satisfiable, model from lib.boogie.bb import BB from lib.boogie.ssa import SSAEnv, is_ssa_str from lib.boogie.predicate_transformers import wp_stmts, sp_stmts #BB_PATH = [ BB_LABEL ] #NONDET_BB_PATH = [ (BB | [ NONDET_BB_PATH ] ) ] #NONDET_SSA_BB_PATH = [ (BB, [ REPL_M ]) | # ( CHOICE_VAR, [ NONDET_SSA_BB_PATH ] ) ] def nd_bb_path_to_ssa(p, bbs, ssa_env, cur_p = ""): path = [] for ind, arg in enumerate(p): if isinstance(arg, str): repl_ms = [ ssa_env.replm() ] for stmt in bbs[arg].stmts: for name in stmt_changed(stmt): ssa_env.update(name) _ = ssa_env.lookup(name) repl_ms.append(ssa_env.replm()) path.append((arg, repl_ms)) else: tmp = [] choice_var = "_split_" + cur_p + "." + str(ind) # Build each SSA-ed subpath for nsplit, subp in enumerate(arg): suffix = cur_p + "." + str(ind) + "." + str(nsplit) + "." ssaed_subpath = \ nd_bb_path_to_ssa(subp, bbs, SSAEnv(ssa_env, suffix), cur_p + suffix) tmp.append(ssaed_subpath) # Compute the set of variables changed across ALL paths changed = set() for (_, sub_env) in tmp: changed.update(sub_env.changed()) # Compute their ssa name BEFORE the paths old_varm = { s : ssa_env.lookup(s) for s in changed } # Make sure each of them is upded in the environment AFTER the paths for s in changed: ssa_env.update(s) # For each sub-path add a "union" block at the end # that makes sure the SSA-ed names of all changed variables # across all paths match up for (nsplit, (subp, sub_env)) in enumerate(tmp): bb_name = "_union_" + cur_p + "." + str(ind) + "." + \ str(nsplit) bb_stmts = [] bb_replmps = [ sub_env.replm() ] for s in changed: if (s in sub_env.changed()): old_var = sub_env.lookup(s) sub_env.remove(s) else: old_var = old_varm[s] bb_stmts.append(AstAssignment(AstId(ssa_env.lookup(s)), AstId(old_var))) bb_replmps.append(sub_env.replm()) bb = BB(set(), bb_stmts, set()) bbs[bb_name] = bb subp.append((bb_name, bb_replmps)) path.append((choice_var, map(lambda x: x[0], tmp))) return (path, ssa_env) def ssa_stmt(stmt, prev_replm, cur_replm): # Havoc's turn into no-ops when SSA-ed. if isinstance(stmt, AstHavoc): return AstAssert(AstTrue()); if isinstance(stmt, AstAssignment): return AstAssignment(replace(stmt.lhs, cur_replm), replace(stmt.rhs, prev_replm)) else: return replace(stmt, cur_replm) def _ssa_stmts(stmts, envs): return [ssa_stmt(stmts[i], envs[i], envs[i+1]) for i in xrange(0, len(stmts))] def ssa_path_to_z3(ssa_path, bbs): def f(arg): if (arg[0].startswith("_split_")): split_var = arg[0] return Or([And((Int(split_var) == ind), ssa_path_to_z3(x, bbs)) for ind, x in enumerate(arg[1])]) else: return And([stmt_to_z3(stmt, AllIntTypeEnv()) for stmt in _ssa_stmts(bbs[arg[0]].stmts, arg[1])]) return And(map(f, ssa_path)) def is_nd_bb_path_possible(bbpath, bbs): nd_ssa_p, _ = nd_bb_path_to_ssa(bbpath, bbs, SSAEnv(None, "")) return satisfiable(ssa_path_to_z3(nd_ssa_p, bbs)) def extract_ssa_path_vars(ssa_p, m): argsS = set([str(x) for x in m if (not is_ssa_str(str(x)) and '_split_' not in str(x))]) def _helper(ssa_p): concrete_ssa_path = [] for (_, arg) in enumerate(ssa_p): if (arg[0].startswith("_split_")): choice_var, nd_paths = arg taken_ssa_path = nd_paths[m[choice_var]] concrete_ssa_path.extend(_helper(taken_ssa_path)) else: (bb, repl_ms) = arg envs = [] for repl_m in repl_ms: vs = set(map(str, repl_m.keys())).union(argsS) new_env = { orig_name : m.get(ssa_name, None) for (orig_name, ssa_name) in [(x, str(repl_m.get(AstId(x), x))) for x in vs ] } envs.append(new_env); concrete_ssa_path.append((bb,envs)) return concrete_ssa_path return [x for x in _helper(ssa_p) if '_union_' not in x[0]] def get_path_vars(bbpath, bbs): ssa_p, _ = nd_bb_path_to_ssa(bbpath, bbs, SSAEnv(None, "")) m = model(ssa_path_to_z3(ssa_p, bbs)) return extract_ssa_path_vars(ssa_p, m); def wp_nd_ssa_path(ssa_p, bbs, pred, typeEnv): for arg in reversed(ssa_p): if (arg[0].startswith("_split_")): pred = Or([wp_nd_ssa_path(subp, bbs, pred, typeEnv) for subp in arg[1]]) else: pred = wp_stmts(_ssa_stmts(bbs[arg[0]].stmts, arg[1]), pred, typeEnv) return pred def sp_nd_ssa_path(ssa_p, bbs, pred, typeEnv): for arg in ssa_p: if (arg[0].startswith("_split_")): pred = Or([sp_nd_ssa_path(subp, bbs, pred, typeEnv) for subp in arg[1]]) else: pred = sp_stmts(_ssa_stmts(bbs[arg[0]].stmts, arg[1]), pred, typeEnv) return pred
997,644
37b18f5ee7c926135cc0b8c75f53e82b81c003d5
import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import sys, os, glob if len(sys.argv) < 3: print('Usage:') print(' python3 {} <input file> <solution prefix>'.format(sys.argv[0])) sys.exit(0) input_file = sys.argv[1] sol_file = sys.argv[2] with open(input_file,'r') as f: xlen, ylen, h = (float(x) for x in f.readline().split()); tc, th = (float(x) for x in f.readline().split()); print("Input file processed: " + input_file) files = glob.glob(sol_file+"*") filenames = [os.path.split(x)[1] for x in files] y, x = np.mgrid[0.:ylen+h:h, 0.:xlen+h:h] fig = plt.figure() ims = [] for file in filenames: sol = np.loadtxt(file) ims.append((plt.pcolor(x, y, sol, cmap='jet', vmin=tc, vmax=th),)) im_ani = animation.ArtistAnimation(fig, ims, interval=200, repeat_delay=1000, blit=True) plt.show()
997,645
3b61f16f8063b29b01d139b8447479c7db86f301
''' Fornecido um número inteiro n (n≥10), exibir o valor correspondente aos dois dígitos mais à direita de n, sem utilizar o operador de resto. ''' n = int(input("n: ")) print(n - (n // 100 * 100))
997,646
918352b49a5db2011d01ac5bfedc18d61db13563
#!/usr/bin/env python import sys import platform import serial.tools.list_ports import time from dronekit.mavlink import MAVConnection from pymavlink import mavutil, mavwp from dronekit import connect import classes import threading from threading import Thread from subprocess import Popen CLOSE_MSG = "1" INIT_MSG = "2" ARM_MSG = "3" TAKEOFF_MSG = "4" LANDDISARM_MSG = "5" MOVE_NED_MSG = "6" MOVE_GPS_MSG = "7" man_name = ( "Rick", "Morty", "Jerry", "Beth", "Summer", "snuggles/Snowball", "Mr. PB", "Amish Cyborg") select_ports = ("25.118.135.27:5770", "25.118.135.27:5760", "COM11") move_north = 10 move_east = 10 move_down = 0 enum_UAS = [] num_of_UAS = 2 #initializer functions def initialize_all_UAS(enum_UAS, simulation=False, sequentially=False): #initialize instances of UAS class for i in range(num_of_UAS): t = classes.UAS() enum_UAS += [t] t.start() for i in range(num_of_UAS): if simulation: enum_UAS[i].mailbox.put((INIT_MSG, i, 57600, "UAS IP unused in sim", "udp:127.0.0.1:1455%d" % i, "udp:127.0.0.1:1555%d" % i, True, man_name[i])) else: enum_UAS[i].mailbox.put((INIT_MSG, i, 57600, select_ports[i], "udp:127.0.0.1:1455%d" % i, "udp:127.0.0.1:1555%d" % i, False, man_name[i])) # Option to initialize sequentially if sequentially: while not enum_UAS[i].get_UAS_init(): time.sleep(2) #wait until all UAS are initialized pass_flag = False while not pass_flag: for i in range(num_of_UAS): pass_flag = True if not enum_UAS[i].get_UAS_init(): pass_flag = False #print "waiting for unlock" time.sleep(1) def arm_all_UAS(): for i in range(num_of_UAS): enum_UAS[i].mailbox.put((ARM_MSG, )) pass_flag = False while not pass_flag: for i in range(num_of_UAS): pass_flag = True if not enum_UAS[i].get_UAS_armed(): pass_flag = False #print "waiting for unlock" time.sleep(0.5) def takeoff_all_UAS(enum_UAS): for i in range(num_of_UAS): enum_UAS[i].mailbox.put((TAKEOFF_MSG, 10)) pass_flag = False while not pass_flag: for i in range(num_of_UAS): pass_flag = True if not enum_UAS[i].get_UAS_RTF(): pass_flag = False #print "waiting for unlock" time.sleep(1) #utility functions def wait_till_all_UAS_unlocked(enum_UAS): pass_flag = False while not pass_flag: for i in range(num_of_UAS): pass_flag = True if enum_UAS[i].get_UAS_locked(): pass_flag = False #print "waiting for unlock" time.sleep(1) def check_for_guided_mode(number): #sets the speed in m/s def change_groundspeed(number, speed): enum_UAS[number].set_groundspeed(speed) def change_all_groundspeed(speed): for i in range(num_of_UAS): change_groundspeed(i, speed) #movement functions def move_UAS_NED(number, dNorth, dEast, dDown): enum_UAS[number].mailbox.put((MOVE_NED_MSG, dNorth, dEast, dDown)) def move_all_NED(dNorth, dEast, dDown): for i in range(num_of_UAS): enum_UAS[i].mailbox.put((MOVE_NED_MSG, dNorth, dEast, dDown)) #Ending functions def land_UAS(number): enum_UAS[number].mailbox.put((LANDDISARM_MSG,)) def land_all(): for i in range(num_of_UAS): enum_UAS[i].mailbox.put((LANDDISARM_MSG,)) def close_UAS(number): enum_UAS[number].stop() def close_all_UAS(): for i in range(num_of_UAS): enum_UAS[i].stop() initialize_all_UAS(enum_UAS, simulation=True) arm_all_UAS() takeoff_all_UAS() #move north move_all_NED(20, 0, -10) wait_till_all_UAS_unlocked(num_of_UAS, enum_UAS) #move east move_all_NED(0, 20, 0) wait_till_all_UAS_unlocked(num_of_UAS, enum_UAS) #move_south move_all_NED(-20, 0, 0) wait_till_all_UAS_unlocked(num_of_UAS, enum_UAS) #move west move_all_NED(0, -20, 0) wait_till_all_UAS_unlocked(num_of_UAS, enum_UAS) #land land_all() # wait for the windows to be closed raw_input("Press Enter to continue...") close_all_UAS() Popen("powershell.exe kill -ProcessName apm", shell=False) Popen("powershell.exe kill -ProcessName mavproxy", shell=False)
997,647
a02759df6c42560a50357e9301bf01bde3fcd8fa
import pytestemb as test def case_01(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_02(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_03(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_04(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_05(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_06(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_07(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_08(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_09(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") def case_10(): test.assert_true(1==1, "1==1") test.assert_true(1==2, "1==2") if __name__ == "__main__": test.add_test_case(case_01) test.add_test_case(case_02) test.add_test_case(case_03) test.add_test_case(case_04) test.add_test_case(case_05) test.add_test_case(case_06) test.add_test_case(case_07) test.add_test_case(case_08) test.add_test_case(case_09) test.add_test_case(case_10) test.run_script()
997,648
d2bca9d5c528f22fc793fc16e5621fe2a7ba09f0
import re def change_variable_to_asn_style(variable): return re.sub("_", "-", variable) def change_variable_to_python_style(variable): return re.sub("-", "_", variable) def get_supported_messages_in_modules(file): supported_msgs_in_modules = {} lines = [] with open(file, "r") as fd: for line in fd.readlines(): if len(line.strip()) == 0: continue lines.append(line) for module in re.findall(r"[\w\_]+", lines[-1])[1:]: supported_msgs_in_modules[change_variable_to_asn_style(module)] = [] patterns = ((r"class\s([\w\_]+)\:", "class"), (r"([\w\_]+)\s*=\s*SEQ\(name=\'\S+\',\smode=MODE_TYPE\)", "sequence")) current_module = '' for line in lines: for pattern in patterns: matched = re.match(pattern[0], line.strip()) if matched: if pattern[1] == 'class': current_module = change_variable_to_asn_style(matched.group(1)) elif pattern[1] == 'sequence': supported_msgs_in_modules[current_module].append(change_variable_to_asn_style(matched.group(1))) else: pass break return supported_msgs_in_modules def reformat_asn_line(line): words = re.findall(r"\([^\(\)]*\(.*?\)[^\(\)]*\)|\(.*?\)|[\w\-]+|[\:\=]+|\{|\}|,", line) new_lines = [] indent = 0 new_line = ' ' * indent for i in range(len(words)): if words[i] == "{": new_line += words[i] indent += 4 new_lines.append(new_line) new_line = ' ' * indent elif words[i] == "}": indent -= 4 if i > 0 and words[i-1] == "{": new_line = new_lines[-1] new_line += (words[i] + " ") del new_lines[-1] else: new_lines.append(new_line) new_line = (' ' * indent) + words[i] + " " else: if i > 0 and words[i-1] == ",": new_lines.append(new_line) new_line = (' ' * indent) + words[i] + " " else: new_line += (words[i] + " ") if new_line.strip() != '': new_lines.append(new_line) return "\n".join(new_lines)
997,649
a9a27a892fd39ec05738649bc02db00d7970e959
from __future__ import print_function, division from typing import Optional import numpy as np from .augmentor import DataAugment class CutNoise(DataAugment): r"""3D CutNoise data augmentation. Randomly add noise to a cuboid region in the volume to force the model to learn denoising when making predictions. This augmentation is only applied to images. Args: length_ratio (float): the ratio of the cuboid length compared with volume length. mode (string): the distribution of the noise pattern. Default: ``'uniform'``. scale (float): scale of the random noise. Default: 0.2. p (float): probability of applying the augmentation. Default: 0.5 additional_targets(dict, optional): additional targets to augment. Default: None """ def __init__(self, length_ratio: float = 0.25, mode: str = 'uniform', scale: float = 0.2, p: float = 0.5, additional_targets: Optional[dict] = None, skip_targets: list = []): super(CutNoise, self).__init__(p, additional_targets, skip_targets) self.length_ratio = length_ratio self.mode = mode self.scale = scale def set_params(self): r"""There is no change in sample size. """ pass def cut_noise(self, images, zl, zh, yl, yh, xl, xh, noise): zdim = images.shape[0] if zdim == 1: temp = images[:, yl:yh, xl:xh].copy() else: temp = images[zl:zh, yl:yh, xl:xh].copy() temp = temp + noise temp = np.clip(temp, 0, 1) if zdim == 1: images[:, yl:yh, xl:xh] = temp else: images[zl:zh, yl:yh, xl:xh] = temp return images def random_region(self, vol_len, random_state): cuboid_len = int(self.length_ratio * vol_len) low = random_state.randint(0, vol_len-cuboid_len) high = low + cuboid_len return low, high def get_random_params(self, images, random_state): zdim = images.shape[0] zl, zh = None, None if zdim > 1: zl, zh = self.random_region(images.shape[0], random_state) yl, yh = self.random_region(images.shape[1], random_state) xl, xh = self.random_region(images.shape[2], random_state) z_len = zh - zl if zdim > 1 else 1 noise_shape = (z_len, yh-yl, xh-xl) noise = random_state.uniform(-self.scale, self.scale, noise_shape) return zl, zh, yl, yh, xl, xh, noise def __call__(self, sample, random_state=np.random.RandomState()): images = sample['image'].copy() random_params = self.get_random_params(images, random_state) sample['image'] = self.cut_noise(images, *random_params) for key in self.additional_targets.keys(): if key not in self.skip_targets and self.additional_targets[key] == 'img': sample[key] = self.cut_noise(sample[key].copy(), *random_params) return sample
997,650
a1b52d906b1d86ecff4a697ba361c5a9e1816fd7
import re class Solution: # @param {string} s # @return {boolean} def isPalindrome(self, s): letters = map(lambda x: x.lower(), re.findall('[a-zA-Z0-9]', s)) count = len(letters) if count <= 1: return True i = 0 j = count-1 while i < j: if letters[i] != letters[j]: return False i += 1 j -= 1 return True
997,651
5ffb98855e4faa2401ea0077e317574683dadc3d
import numpy as np from torch.utils.data.dataset import Dataset from torchvision import transforms import pandas as pd from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt def stat(light): print(light.max(), light.min(), light.mean(), light.var()) # light = np.genfromtxt('20180827_Mouse1_reshape.csv', delimiter=',') # stat(light) # light=light.clip(50,1000) # stat(light) # scaler = MinMaxScaler(feature_range=(0, 1)) # print(scaler.fit(light)) # scaled_light=scaler.transform(light) # stat(scaled_light) # light = light.reshape(498, 29, 259) scaler = MinMaxScaler(feature_range=(0, 1)) class CustomDatasetFromCSV(Dataset): def __init__(self, mode, transforms=None): # 498*7511 self.data = np.genfromtxt('20180827_Mouse1_reshape.csv', delimiter=',') self.labels = np.genfromtxt('20180904_tag.csv', delimiter=',') self.labels-=1 self.transforms = transforms #498*7511 (0,1) print(scaler.fit(self.data)) self.data = self.data.clip(50, 1000) self.data = scaler.transform(self.data) if mode == "train": self.data = self.data[:400, :] self.labels = self.labels[:400] elif mode == "test": self.data = self.data[400:, :] self.labels = self.labels[400:] else: print("wrong mode!") def __getitem__(self, index): single_image_label = self.labels[index] img_as_img = (self.data[index]).reshape(29, 259) # 7511 (0,1) -> 29*259 (-1,1) if self.transforms is not None: img_as_tensor = self.transforms(img_as_img) return (img_as_tensor, single_image_label) def __len__(self): return (self.data.shape[0])
997,652
5f28a55b88622b64c5483e6880153dd921a6685e
from django.shortcuts import render from concept_the_register.titles.models import Title from rest_framework import viewsets from concept_the_register.titles.serializers import TitleSerializer # Create your views here. class TitlesViewSet(viewsets.ModelViewSet): """ API endpoint that allows Titles to be viewed or edited. """ queryset = Title.objects.all() serializer_class = TitleSerializer
997,653
ffb8938d639475b7476c32a696d15fd9e41f5e74
import os # 변환할 파일 경로 정의 import re global path path = "/Users/leewoojin/Desktop/isoi-opti" global sourceFileList sourceFileList = [] # 디렉토리 리스트 global directoryPathList directoryPathList = [] # 변환시킬 파일 확장자명 리스트 정의 global __AllowFileExtensionList __AllowFileExtensionList = ["js", "JS", "txt", "TXT"] def scanDirectory(dirPath): filenames = os.listdir(dirPath) for filename in filenames: filePath = os.path.join(dirPath, filename) if filename.find("node_modules") >= 0 or filename.find("DS_Store") >= 0 or filename.find(".") >= 0: continue else: directoryPathList.append(filePath) scanDirectory(filePath) print("[LOG] directory list. ", directoryPathList) return def scanSourceFiles(): for dirPath in directoryPathList: filenames = os.listdir(dirPath) for filename in filenames: filePath = os.path.join(dirPath, filename) if filename.find(".config") >= 0 or filename.find("DS_Store") >= 0 or filename.find( "_app.js") >= 0 or filename.find("_document.js") >= 0: continue else: extensionType = filename.split(".").pop() for extension in __AllowFileExtensionList: if extension == extensionType: print("[LOG] successful find file. ", filePath) sourceFileList.append(filePath) break print("[LOG] convert list. ", sourceFileList) def optimizationRule(dirPath, scanStartIndex): print("[scanStartIndex] ", scanStartIndex) isValid = False isConvert = False rowCounter = 0 startRowIndex = -1 endRowIndex = -1 startIndex = -1 endIndex = -1 for extension in __AllowFileExtensionList: if dirPath.find(f".{extension}") >= 0: isValid = True break if isValid: # 1. <img * /> 태그 앞 뒤로 다른 태그가 있는지 검사하고 있으면 \n<img * />\n 으로 수정해준다. sourceStore = [] sourceFile = open(dirPath, "r+", encoding="UTF-8") sourceLines = sourceFile.readlines() for line in sourceLines: # [1] 한 줄을 읽어와서 스페이스바를 기준으로 자른다. splitLine = line.split(" ") # 스페이스바 수만큼 공백이 추가될 변수 emptyLine = "" # 탭을 나타내는 공백 변수 tabLine = " " # [2] 소스 라인에 공백 라인이 어느정도 있는지 가늠하기 위해 반복문을 돌려 검사 emptyCounter = 0 for index, value in enumerate(splitLine): # <img 태그가 나타날 때까지 반복문을 돌리는데 나타나면 emptyCounter 에 (index - 1) 한 수를 초기화, 그리고 탈출 if value.find("<img") >= 0: if (index - 1) == -1: emptyCounter = 0 break else: emptyCounter = index - 1 break # [3] 위에서 구한 emptyCounter 를 이용해서 emptyCounter 수 만큼 공백을 추가하여 수정할 때도 기존 소스의 공백 라인을 유지시켜준다. if line.find("<img") >= 0: for i in range(emptyCounter): emptyLine += " " # [4] 위에서 splitLine 해서 나눈 리스트들을 반복문을 돌려 하나씩 <img 태그가 존재하는지 검사하여 # <img 태그가 0번째 인덱스가 아닌 1 이상의 인덱스에 존재한다면 앞 라인에 다른 태그가 존재한다는 의미이므로 \n<img 로 변경해준다. for index, value in enumerate(splitLine): localIndex = value.find("<img") # 이 부분이 \n<img 로 변경해주는 부분, 변경하고 반복문 탈출 if localIndex >= 1: splitLine[index] = re.sub(r"\<img", "\n" + emptyLine + tabLine + "<img", value) break # [5] 다시 Join 하여 리스트들을 문자열로 합치고 newEndSourceLine 변수에 초기화한다. newStartSourceLine = " ".join(splitLine) newEndSourceLine = newStartSourceLine # [6] newStartSourceLine 변수의 데이터에 만약 /> 가 존재한다면 /> 의 시작 인덱스로부터 +3 을 해줘서 # newStartSourceLine의 총 글자 개수 보다 적다면(/> 의 마지막 인덱스 위치를 의미) <img * /> 앞에 또 다른 태그가 있다는 의미이므로 <img * />\n 을 해준다. if newStartSourceLine.rfind("/>") + 3 < newStartSourceLine.__len__(): newEndSourceLine = re.sub(r"\/\>", "/>\n" + emptyLine, newStartSourceLine) # [7] 그리고 파일에 추가하기 위해 기존의 라인에 현재 가공한 데이터를 추가해준다. sourceStore = sourceStore + [newEndSourceLine] sourceFile.seek(0) sourceFile.writelines(sourceStore) sourceFile.truncate() sourceFile.close() # 2. 이제 img 태그가 시작하고 끝나는 start, end의 row 인덱스와 <img 가 시작하는 글자 인덱스, /> 끝나는 글자 인덱스를 구한다. sourceFile2 = open(dirPath, "r", encoding="UTF-8") sourceLines2 = sourceFile2.readlines() for line in sourceLines2: if rowCounter >= scanStartIndex: # [1] <img의 시작 인덱스를 구한다. if startIndex == -1: startIndex = line.find("<img") # [2] 만약 없으면 다음 라인을 검사한다. if startIndex == -1: rowCounter += 1 sourceStore = sourceStore + [line] continue else: # [3] 만약 있으면 rowCounter의 값을 startRowIndex에 초기화 한다. startRowIndex = rowCounter print("startRowIndex", startRowIndex) # [4] <img 의 끝나는 부분인 /> 혹은 > 의 시작 인덱스를 구한다. if endIndex == -1: endIndex = line.find("/>") if endIndex == -1: endIndex = line.find(">") # [5] 만약 없으면 다음 라인으로 이동하여 다시 /> 혹은 > 가 있는지 검사한다. if endIndex == -1: rowCounter += 1 sourceStore = sourceStore + [line] continue else: # [6] 만약 있으면 endIndex 를 /> 이 두개의 글자 수를 포함한 2를 더한 rowCounter의 값을 endRowIndex에 초기화 한다. endIndex += 2 endRowIndex = rowCounter print("endRowIndex, ", endRowIndex) else: rowCounter += 1 sourceFile2.close() # 3. 재귀 함수 탈출용 로직 if startRowIndex >= 0 and endRowIndex >= 0: print(f"startRowIndex : {startRowIndex} / endRowIndex : {endRowIndex}") else: # 더 이상 다음 img 태그를 찾는 startRowIndex, endRowIndex가 존재하지 않는다면 # 읽고 있는 소스 파일에는 다음으로 존재하는 img 태그가 없는 것으로 간주하고 재귀 함수를 탈출한다. return # 여기까지 왔다면 startIndex와 endIndex 그리고 rowStartIndex, rowEndIndex가 구해졌다는 의미이므로 이제 최적화 과정을 진행한다. if startIndex != -1 and endIndex != -1 and startRowIndex != -1 and endRowIndex != -1: # 이제 webP 크로스 브라우징을 위해 적용할 이미지 소스를 optimizationRule가 직접 반영해준다. sourceStore = [] sourceFile3 = open(dirPath, "r+", encoding="UTF-8") sourceLines3 = sourceFile3.readlines() for rowIndex, line in enumerate(sourceLines3): # (커스텀) GIF 파일은 해당 로직을 실행하지 않도록 설정, GIF 파일을 WEBP로 최적화하게 되면 FPS가 많이 줄어들어 클라이언트 측면에서 볼 때 애니메이션이 느린 이미지처럼 보인다. if startRowIndex <= rowIndex <= endRowIndex and line.find(".gif") == -1: # 파일 확장자 구하기 extension = "jpg" if line.find(".jpg") >= 0: extension = "jpg" elif line.find(".png") >= 0: extension = "png" elif line.find(".gif") >= 0: extension = "gif" emptyLine = "" tabLine = " " for i in range(startIndex): emptyLine += " " if not isConvert: propertyList = [] for rowIndex2, line2 in enumerate(sourceLines3): if startRowIndex <= rowIndex2 <= endRowIndex: regLine2 = re.findall(r'''[(\<)]{1}img\b[a-zA-Z0-9\_\.\=\{\}\'\"\/\+\[\]\(\)\%\:\- ]*[(\/\>)|(\>)]{1}''', line2) if regLine2.__len__() > 0: propertyList.append(re.findall(r'''[a-zA-Z\-]*\b\=[\{\"\'][a-zA-Z0-9_.+\'\"\/\[\]\(\)\%\:\- ]*[\}\"\']''', regLine2[0])) print("propertyList, ", propertyList) srcContent = "" webpContent = "" stringifyProperty = [] for propertyContents in propertyList: for propertyContent in propertyContents: if propertyContent.find("src=") == -1: print("propertyContent, ", propertyContent) stringifyProperty.append(propertyContent) elif propertyContent.find("src=") >= 0: srcContent = re.sub(r'''(src=)''', "", propertyContent) if re.findall(r'''(\.jpg)|(\.jpeg)|(\.JPG)|(\.JPEG)|(\.png)|(\.PNG)''', srcContent).__len__() > 0: webpContent = re.sub(r'''(\.jpg)|(\.jpeg)|(\.png)|(\.JPG)|(\.JPEG)|(\.png)|(\.PNG)''', ".webp", srcContent) else: webpContent = srcContent print("webpContent, ", webpContent) print("srcContent", srcContent) stringifyProperty = " ".join(stringifyProperty) print("stringifyProperty, ", stringifyProperty) newSourceLine = f"\n{emptyLine}<picture>\n" \ f"{emptyLine + tabLine}<source srcSet={webpContent} {stringifyProperty} type='image/webp' />\n" \ f"{emptyLine + tabLine}<source srcSet={srcContent} {stringifyProperty} type='image/{extension}' />\n" \ f"{emptyLine + tabLine}<img src={srcContent} {stringifyProperty} />\n" \ f"{emptyLine}</picture>\n" sourceStore = sourceStore + [newSourceLine] isConvert = True else: sourceStore = sourceStore + [""] else: sourceStore = sourceStore + [line] sourceFile3.seek(0) sourceFile3.writelines(sourceStore) sourceFile3.truncate() sourceFile3.close() print(f"dirPath : {dirPath}, {scanStartIndex}") if isConvert: optimizationRule(dirPath, startRowIndex + 5) else: optimizationRule(dirPath, startRowIndex + 1) return else: print(f"[WARN] {dirPath} is not file. passed optimization.") return def runOptimization(): for dirPath in sourceFileList: print(f"[{dirPath}]") optimizationRule(dirPath, 0) return scanDirectory(path) directoryPathList.append(path) scanSourceFiles() runOptimization()
997,654
86896de4828822a7b6fe226ec1e98e7b4b15a9e6
# -*- coding: utf-8 -*- """ Created on Wed Aug 22 16:17:01 2018 @author: bmpatel """ sum=0 for i in range(0,5): ino=int(input("Enter Number=")) sum=sum+ino avg=sum/5 print(avg)
997,655
141e856043e725189007cc990a939a9362457b69
from flask import Blueprint from . import forms auth = Blueprint('auth',__name__) from . import views
997,656
8458126a0b2d428a65f981f8f5dad097bd44527f
"""Panda package management for Windows. DC/OS installation state descriptor type definition. """ import json from pathlib import Path from common import logger from common import constants as cm_const from common.storage import ISTOR_NODE, IStorNodes from core import exceptions as cr_exc from core import utils as cr_utl LOG = logger.get_logger(__name__) class ISTATE: """DC/OS installation states codes.""" UNDEFINED = 'UNDEFINED' INSTALLATION_IN_PROGRESS = 'INSTALLATION_IN_PROGRESS' INSTALLATION_FAILED = 'INSTALLATION_FAILED' INSTALLED = 'INSTALLED' UPGRADE_IN_PROGRESS = 'UPGRADE_IN_PROGRESS' UPGRADE_FAILED = 'UPGRADE_FAILED' VALID_ISTATES = [ getattr(ISTATE, i) for i in ISTATE.__dict__ if not i.startswith('__') ] class InstallationState: """DC/OS installation state descriptor.""" def __init__(self, istor_nodes: IStorNodes=None, istate: str=ISTATE.UNDEFINED, istate_dpath: Path=None, save: bool=True): """Constructor. :param istor_nodes: IStorNodes, DC/OS installation storage nodes (set of pathlib.Path objects) :param istate: str, DC/OS installation state code :param istate_dpath: Path, absolute path to the DC/OS state directory within the local DC/OS installation storage :param save: bool, save DC/OS installation state descriptor to file, if True """ self.msg_src = self.__class__.__name__ if istor_nodes is not None: istate_dpath = getattr(istor_nodes, ISTOR_NODE.STATE) elif istate_dpath is None: assert False, ( f'{self.msg_src}: Argument: Either istor_nodes or istate_dpath' f'must be specified' ) assert istate in VALID_ISTATES, ( f'{self.msg_src}: Argument: istate: Invalid value: {istate}' ) self._istor_nodes = istor_nodes self._istate = istate self._istate_dpath = istate_dpath if save is True: self.save() def __str__(self): return str(self.body) def __eq__(self, other): if not isinstance(other, InstallationState): return False return (self._istor_nodes == other._istor_nodes and self._istate == other._istate and self._istate_dpath == other._istate_dpath) @property def body(self): """Construct JSON-compatible dict representation of DC/OS installation state descriptor. """ if self._istor_nodes is None: istor_nodes = self._istor_nodes else: istor_nodes = { k: str(v) for k, v in self._istor_nodes._asdict().items() } return { 'istor_nodes': istor_nodes, 'istate': self._istate, 'istate_dpath': str(self._istate_dpath), } @ classmethod def load(cls, fpath: Path): """Load DC/OS installation state descriptor from a file. :param fpath: Path, path to a JSON-formatted descriptor file. :return: InstallationState, DC/OS installation state descriptor object. """ isd_body = cr_utl.rc_load_json(fpath, emheading=cls.__name__) # TODO: Add content verification (jsonschema) for m_body. Raise # ValueError, if conformance was not confirmed. try: istor_nodes = IStorNodes(**{ k: Path(v) for k, v in isd_body.get('istor_nodes').items() }) if isinstance(isd_body.get('istor_nodes'), dict) else None istate_desc = cls( istor_nodes=istor_nodes, istate=isd_body.get('istate'), istate_dpath=Path(isd_body.get('istate_dpath')), save=False, ) LOG.debug(f'{cls.__name__}: Load: {fpath}') except (ValueError, AssertionError, TypeError) as e: err_msg = (f'{cls.__name__}: Load:' f' {fpath}: {type(e).__name__}: {e}') raise cr_exc.RCInvalidError(err_msg) from e return istate_desc def save(self): """Save DC/OS installation state descriptor to a file within the installation's state directory.""" fpath = self._istate_dpath.joinpath(cm_const.DCOS_INST_STATE_FNAME_DFT) try: self._istate_dpath.mkdir(parents=True, exist_ok=True) with fpath.open(mode='w') as fp: json.dump(self.body, fp) except (OSError, RuntimeError) as e: err_msg = f'{self.msg_src}: Save: {type(e).__name__}: {e}' raise cr_exc.RCError(err_msg) from e LOG.debug(f'{self.msg_src}: Save: {fpath}') @property def istor_nodes(self): """""" return self._istor_nodes @istor_nodes.setter def istor_nodes(self, istor_nodes: IStorNodes): """Set DC/OS installation storage layout part of the DC/OS installation state descriptor. :param istor_nodes: IStorNodes, DC/OS installation storage nodes (set of pathlib.Path objects) """ err_msg_base = f'{self.msg_src}: Set storage layout' if self._istor_nodes is not None: raise RuntimeError(f'{err_msg_base}: Already set') elif getattr(istor_nodes, ISTOR_NODE.STATE) != self._istate_dpath: raise ValueError(f'{err_msg_base}: Installation' f' state directory mismatch: {istor_nodes}') self._istor_nodes = istor_nodes try: self.save() except cr_exc.RCError: self._istor_nodes = None raise @property def istate(self): """""" return self._istate @istate.setter def istate(self, istate: str): """Set DC/OS installation state code. :param istate: str, DC/OS installation state code """ err_msg = f'{self.msg_src}: Set state: {istate}' if istate not in VALID_ISTATES: raise ValueError(err_msg) istate_former = self._istate self._istate = istate try: self.save() except cr_exc.RCError: self._istate = istate_former raise @property def istate_dpath(self): """""" return self._istate_dpath
997,657
853563b1dda5099cc893d715737863348d62c778
#!/usr/bin/env python """ _Step.Executor.StageOut_ Implementation of an Executor for a StageOut step """ from __future__ import print_function import logging import os import os.path import signal import sys from WMCore.Algorithms.Alarm import Alarm, alarmHandler from WMCore.FwkJobReport.Report import Report from WMCore.Lexicon import lfn as lfnRegEx from WMCore.Lexicon import userLfn as userLfnRegEx from WMCore.Storage.FileManager import StageOutMgr as FMStageOutMgr from WMCore.Storage.StageOutMgr import StageOutMgr from WMCore.WMSpec.Steps.Executor import Executor class StageOut(Executor): """ _StageOut_ Execute a StageOut Step """ def pre(self, emulator=None): """ _pre_ Pre execution checks """ # Are we using an emulator? if emulator is not None: return emulator.emulatePre(self.step) logging.info("Steps.Executors.%s.pre called", self.__class__.__name__) return None def execute(self, emulator=None): """ _execute_ """ # Are we using emulators again? if emulator is not None: return emulator.emulate(self.step, self.job) logging.info("Steps.Executors.%s.execute called", self.__class__.__name__) overrides = {} if hasattr(self.step, 'override'): overrides = self.step.override.dictionary_() # propagete upstream cmsRun outcome such that we can decide whether to # stage files out or not self.failedPreviousStep = overrides.get('previousCmsRunFailure', False) # Set wait to two hours per retry # this alarm leaves a subprocess behing that may cause trouble, see #6273 waitTime = overrides.get('waitTime', 7200 * self.step.retryCount) logging.info("StageOut override is: %s ", self.step) # Pull out StageOutMgr Overrides # switch between old stageOut behavior and new, fancy stage out behavior useNewStageOutCode = False if getattr(self.step, 'newStageout', False) or \ ('newStageOut' in overrides and overrides.get('newStageOut')): useNewStageOutCode = True stageOutCall = {} if "command" in overrides and "option" in overrides \ and "phedex-node" in overrides \ and "lfn-prefix" in overrides: logging.critical('using override in StageOut') stageOutCall['command'] = overrides.get('command') stageOutCall['option'] = overrides.get('option') stageOutCall['phedex-node'] = overrides.get('phedex-node') stageOutCall['lfn-prefix'] = overrides.get('lfn-prefix') # naw man, this is real # iterate over all the incoming files if not useNewStageOutCode: # old style manager = StageOutMgr(**stageOutCall) manager.numberOfRetries = self.step.retryCount manager.retryPauseTime = self.step.retryDelay else: # new style logging.critical("STAGEOUT IS USING NEW STAGEOUT CODE") manager = FMStageOutMgr(retryPauseTime=self.step.retryDelay, numberOfRetries=self.step.retryCount, **stageOutCall) # We need to find a list of steps in our task # And eventually a list of jobReports for out steps # Search through steps for report files filesTransferred = [] for step in self.stepSpace.taskSpace.stepSpaces(): if step == self.stepName: # Don't try to parse your own report; it's not there yet continue stepLocation = os.path.join(self.stepSpace.taskSpace.location, step) logging.info("Beginning report processing for step %s", step) reportLocation = os.path.join(stepLocation, 'Report.pkl') if not os.path.isfile(reportLocation): logging.error("Cannot find report for step %s in space %s", step, stepLocation) continue # First, get everything from a file and 'unpersist' it stepReport = Report() stepReport.unpersist(reportLocation, step) # Don't stage out files from bad steps. Each step has its own Report.pkl file # We need to check all steps executed so far, otherwise it might stage out # files for chained steps when the overall job has already failed to process # one of them if not stepReport.stepSuccessful(step) or self.failedPreviousStep: msg = "Either the step did not succeed or an upstream step failed. " msg += "Skipping stage out of any root output files in this job." logging.warning(msg) continue # Okay, time to start using stuff # Now I'm a bit confused about this; each report should ONLY # Have the results of that particular step in it, # So getting all the files should get ONLY the files # for that step; or so I hope files = stepReport.getAllFileRefsFromStep(step=step) for fileName in files: # make sure the file information is consistent if hasattr(fileName, 'pfn') and (not hasattr(fileName, 'lfn') or not hasattr(fileName, 'module_label')): msg = "Not a valid file: %s" % fileName logging.error(msg) continue # Figuring out if we should do straight to merge # - should we do straight to merge at all ? # - is straight to merge disabled for this output ? # - are we over the size threshold # - are we over the event threshold ? straightToMerge = False if not getattr(fileName, 'merged', False): if hasattr(fileName, 'dataset') and fileName.dataset.get('dataTier', "") in ["NANOAOD", "NANOAODSIM"]: logging.info("NANOAOD and NANOAODSIM files never go straight to merge!") elif fileName.module_label not in getattr(self.step.output, 'forceUnmergedOutputs', []): if hasattr(self.step.output, 'minMergeSize') and getattr(fileName, 'size', 0) >= self.step.output.minMergeSize: logging.info("Sending %s straight to merge due to minMergeSize", fileName.lfn) straightToMerge = True elif getattr(fileName, 'events', 0) >= getattr(self.step.output, 'maxMergeEvents', sys.maxsize): logging.info("Sending %s straight to merge due to maxMergeEvents", fileName.lfn) straightToMerge = True if straightToMerge: try: fileName = self.handleLFNForMerge(mergefile=fileName, step=step) except Exception as ex: logging.info("minMergeSize: %s", getattr(self.step.output, 'minMergeSize', None)) logging.info("maxMergeEvents: %s", getattr(self.step.output, 'maxMergeEvents', None)) logging.error("Encountered error while handling LFN for merge %s", fileName) logging.error(str(ex)) manager.cleanSuccessfulStageOuts() stepReport.addError(self.stepName, 60401, "DirectToMergeFailure", str(ex)) # Save the input PFN in case we need it # Undecided whether to move fileName.pfn to the output PFN fileName.InputPFN = fileName.pfn lfn = fileName.lfn fileSource = getattr(fileName, 'Source', None) if fileSource in ['TFileService', 'UserDefined']: userLfnRegEx(lfn) else: lfnRegEx(lfn) fileForTransfer = {'LFN': lfn, 'PFN': getattr(fileName, 'pfn'), 'PNN': None, 'StageOutCommand': None, 'Checksums': getattr(fileName, 'checksums', None)} signal.signal(signal.SIGALRM, alarmHandler) signal.alarm(waitTime) try: manager(fileForTransfer) # Afterwards, the file should have updated info. filesTransferred.append(fileForTransfer) fileName.StageOutCommand = fileForTransfer['StageOutCommand'] fileName.location = fileForTransfer['PNN'] fileName.OutputPFN = fileForTransfer['PFN'] except Alarm: msg = "Indefinite hang during stageOut of logArchive" logging.error(msg) manager.cleanSuccessfulStageOuts() stepReport.addError(self.stepName, 60403, "StageOutTimeout", msg) # well, if it fails for one file, it fails for the whole job... break except Exception as ex: manager.cleanSuccessfulStageOuts() stepReport.addError(self.stepName, 60307, "StageOutFailure", str(ex)) stepReport.persist(reportLocation) raise signal.alarm(0) # Am DONE with report. Persist it stepReport.persist(reportLocation) # Done with all steps, and should have a list of # stagedOut files in fileForTransfer logging.info("Transferred %i files", len(filesTransferred)) return def post(self, emulator=None): """ _post_ Post execution checkpointing """ # Another emulator check if emulator is not None: return emulator.emulatePost(self.step) logging.info("Steps.Executors.%s.post called", self.__class__.__name__) for step in self.stepSpace.taskSpace.stepSpaces(): if step == self.stepName: # Don't try to parse your own report; it's not there yet continue stepLocation = os.path.join(self.stepSpace.taskSpace.location, step) logging.info("Beginning report processing for step %s", step) reportLocation = os.path.join(stepLocation, 'Report.pkl') if not os.path.isfile(reportLocation): logging.error("Cannot find report for step %s in space %s", step, stepLocation) continue # First, get everything from a file and 'unpersist' it stepReport = Report(step) stepReport.unpersist(reportLocation) # Don't stage out files from bad steps. if not stepReport.stepSuccessful(step): continue files = stepReport.getAllFileRefsFromStep(step=step) for fileInfo in files: if hasattr(fileInfo, 'lfn') and hasattr(fileInfo, 'location') and hasattr(fileInfo, 'guid'): fileInfo.user_dn = getattr(self.step, "userDN", None) fileInfo.async_dest = getattr(self.step, "asyncDest", None) fileInfo.user_vogroup = getattr(self.step, "owner_vogroup", '') fileInfo.user_vorole = getattr(self.step, "owner_vorole", '') stepReport.persist(reportLocation) return None # Accessory methods def handleLFNForMerge(self, mergefile, step): """ _handleLFNForMerge_ Digs up unmerged LFN out of WMStep outputModule and changes the current file to match. Requires a mergedLFNBase in the WMSpec output module """ # First get the output module # Do this by finding the name in the step report # And then finding that module in the WMStep Helper outputName = getattr(mergefile, 'module_label', None) if not outputName: logging.error("Attempt to merge directly failed due to " \ + "No module_label in file.") if outputName.lower() == "merged": # Don't skip merge for merged files! return mergefile stepHelper = self.task.getStep(stepName=step) outputMod = stepHelper.getOutputModule(moduleName=outputName) if not outputMod: # Then we couldn't get the output module logging.error("Attempt to directly merge failed " \ + "due to no output module %s in WMStep" \ % (outputName)) return mergefile # Okay, now we should have the output Module # Now we just need a second LFN newBase = getattr(outputMod, 'mergedLFNBase', None) oldBase = getattr(outputMod, 'lfnBase', None) if not newBase: # Then we don't have a base to change it to logging.error("Direct to Merge failed due to no mergedLFNBase in %s", outputName) return mergefile # Replace the actual LFN base oldLFN = getattr(mergefile, 'lfn') newLFN = oldLFN.replace(oldBase, newBase) # Set the file attributes setattr(mergefile, 'lfn', newLFN) setattr(mergefile, 'merged', True) # Return the file return mergefile
997,658
d2a54510389f90498da93dc355339e2d168e068e
import os import re import nltk import numpy as np from numpy.random import seed seed(1) import sys, gensim, logging, codecs, gzip from nltk.corpus import stopwords from nltk.tokenize import word_tokenize as wt from nltk.tokenize import sent_tokenize as st from gensim.models import Word2Vec from numpy import argmax from collections import Counter from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import logging from os.path import exists import pickle stopwords = stopwords.words('english') logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.DEBUG) PATH = "C:\\Users\\Bhushan Borole\\Desktop\\Coding\\Projects\\Text-Summarization\\dataset\\cnn" gensim_model_name = 'w2v' class LSTM(): def __init__(self, training_data): self.training_data = training_data self.batch_size = 32 self.epochs = 20 self.hidden_units = 300 self.learning_rate = 0.005 self.clip_norm = 2.0 self.encoder_shape = np.shape(training_data['article'][0]) self.decoder_shape = np.shape(training_data['summaries'][0]) def encode_decoder(self, data): print('Encoder_Decoder LSTM...') """__encoder___""" encoder_inputs = Input(shape=self.encoder_shape) encoder_LSTM = LSTM(self.hidden_units,dropout_U=0.2, dropout_W=0.2, return_sequences=True, return_state=True) encoder_LSTM_rev = LSTM(self.hidden_units,return_state=True, return_sequences=True, dropout_U=0.05, dropout_W=0.05, go_backwards=True) encoder_outputs, state_h, state_c = encoder_LSTM(encoder_inputs) encoder_outputsR, state_hR, state_cR = encoder_LSTM_rev(encoder_inputs) state_hfinal=Add()([state_h,state_hR]) state_cfinal=Add()([state_c,state_cR]) encoder_outputs_final = Add()([encoder_outputs,encoder_outputsR]) encoder_states = [state_hfinal,state_cfinal] """____decoder___""" decoder_inputs = Input(shape=(None,self.decoder_shape[1])) decoder_LSTM = LSTM(self.hidden_units, return_sequences=True, dropout_U=0.2, dropout_W=0.2, return_state=True) decoder_outputs, _, _ = decoder_LSTM(self.decoder_inputs, initial_state=encoder_states) #Pull out XGBoost, (I mean attention) attention = TimeDistributed(Dense(1, activation = 'tanh'))(encoder_outputs_final) attention = Flatten()(attention) attention = Multiply()([decoder_outputs, attention]) attention = Activation('softmax')(attention) attention = Permute([2, 1])(attention) decoder_dense = Dense(self.decoder_shape[1],activation='softmax') decoder_outputs = decoder_dense(attention) model= Model(inputs=[encoder_inputs,decoder_inputs], outputs=decoder_outputs) print('-------------Model Summary------------') print(model.summary()) print('-'*40) rmsprop = RMSprop(lr=self.learning_rate, clipnorm=self.clip_norm) model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy']) x_train, x_test, y_train, y_test = tts(data["article"],data["summaries"], test_size=0.20) history= model.fit(x=[x_train,y_train], y=y_train, batch_size=self.batch_size, epochs=self.epochs, verbose=1, validation_data=([x_test,y_test], y_test)) print('-------------Model Summary------------') print(model.summary()) print('-'*40) """_________________inference mode__________________""" encoder_model_inf = Model(encoder_inputs,encoder_states) decoder_state_input_H = Input(shape=(self.encoder_shape[0],)) decoder_state_input_C = Input(shape=(self.encoder_shape[0],)) decoder_state_inputs = [decoder_state_input_H, decoder_state_input_C] decoder_outputs, decoder_state_h, decoder_state_c = decoder_LSTM(decoder_inputs, initial_state=decoder_state_inputs) decoder_states = [decoder_state_h, decoder_state_c] decoder_outputs = decoder_dense(decoder_outputs) decoder_model_inf= Model([decoder_inputs]+decoder_state_inputs, [decoder_outputs]+decoder_states) scores = model.evaluate([x_test,y_test],y_test, verbose=1) print('LSTM test scores:', scores) print('-------------Model Summary------------') print(model.summary()) print('-'*40) return model,encoder_model_inf,decoder_model_inf,history class Word2vec(): def __init__(self, data): self.data = data pass def create_corpus(self): corpus = [] all_sentences = [] for k in self.data: for p in self.data[k]: corpus.append(st(p)) for sent in range(len(corpus)): for k in corpus[sent]: all_sentences.append(k) for m in range(len(all_sentences)): all_sentences[m] = wt(all_sentences[m]) all_words=[] for sent in all_sentences: hold=[] for word in sent: hold.append(word.lower()) all_words.append(hold) return all_words def save_gensim_model(self, model): with open(gensim_model_name, 'wb') as model_file: pickle.dump(model, model_file) def update_model(self, model, corpus): logger.info('Updating model') model.train(corpus, total_examples=len(corpus), epochs=25) self.save_gensim_model(model) return model def word2vec_model(self, corpus): emb_size = 300 model_type = {"skip_gram":1,"CBOW":0} window = 10 workers = 6 min_count = 1 batch_words = 20 epochs = 25 #include bigrams #bigramer = gs.models.Phrases(corpus) model = Word2Vec(corpus,size=emb_size,sg=model_type["skip_gram"], compute_loss=True,window=window,min_count=min_count,workers=workers, batch_words=batch_words) self.save_gensim_model(model) model.train(corpus,total_examples=len(corpus),epochs=epochs) logger.info('Model deployed') return model ''' def update(self, model, corpus, mincount=3): """ Add new words from new data to the existing model's vocabulary, generate for them random vectors in syn0 matrix. For existing words, increase their counts by their frequency in the new data. Generate new negative sampling matrix (syn1neg). Then, train the existing model with the new data. """ added_count = 0 logging.info("Extracting vocabulary from new data...") newmodel = gensim.models.Word2Vec(min_count=mincount, sample=0, hs=0) newmodel.build_vocab(corpus) logging.info("Merging vocabulary from new data...") sampleint = model.wv.vocab[model.index2word[0]].sample_int words = 0 newvectors = [] newwords = [] for word in newmodel.vocab: words += 1 if word not in model.vocab: v = gensim.models.word2vec.Vocab() v.index = len(model.vocab) model.wv.vocab[word] = v model.wv.vocab[word].count = newmodel.vocab[word].count model.wv.vocab[word].sample_int = sampleint model.index2word.append(word) random_vector = model.seeded_vector(model.index2word[v.index] + str(model.seed)) newvectors.append(random_vector) added_count += 1 newwords.append(word) else: model.vocab[word].count += newmodel.vocab[word].count if words % 1000 == 0: logging.info("Words processed: %s" % words) logging.info("added %d words into model from new data" % (added_count)) logging.info("Adding new vectors...") alist = [row for row in model.syn0] for el in newvectors: alist.append(el) model.syn0 = array(alist) logging.info("Generating negative sampling matrix...") model.syn1neg = zeros((len(model.vocab), model.layer1_size), dtype=REAL) model.make_cum_table() model.neg_labels = zeros(model.negative + 1) model.neg_labels[0] = 1. model.syn0_lockf = ones(len(model.vocab), dtype=REAL) logging.info("Training with new data...") model.train(corpus, total_examples=len(corpus)) return model ''' def encode(self, corpus): all_words = [] one_hot = {} for sent in corpus: for word in wt(' '.join(sent)): all_words.append(word.lower()) #print(len(set(all_words)), "unique words in corpus") logger.info(str(len(all_words)) + 'unique words in corpus') #maxcorp=int(input("Enter desired number of vocabulary: ")) maxcorp = int(len(set(all_words)) / 1.1) wordcount = Counter(all_words).most_common(maxcorp) all_words = [] for p in wordcount: all_words.append(p[0]) all_words = list(set(all_words)) #print(len(all_words), "unique words in corpus after max corpus cut") #logger.info(str(len(all_words)) + 'unique words in corpus after max corpus cut') #integer encode #label_encoder = LabelEncoder() #integer_encoded = label_encoder.fit_transform(all_words) #one hot label_encoder = LabelEncoder() integer_encoded = label_encoder.fit_transform(all_words) onehot_encoder = OneHotEncoder(sparse=False) #integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(np.array(all_words).reshape(-1, 1)) for i in range(len(onehot_encoded)): word = label_encoder.inverse_transform([argmax(onehot_encoded[i, :])])[0].strip() one_hot[word] = onehot_encoded[i] #print(len(one_hot.keys())) return one_hot def word_vec_matrix(self, model, one_hot): training_data = {"article":[],"summaries":[]} i=1 for k in range(len(self.data["articles"])): art=[] summ=[] for word in wt(self.data["articles"][k].lower()): try: art.append(model.wv.word_vec(word)) except Exception as e: print(e) for word in wt(self.data["summaries"][k].lower()): try: summ.append(one_hot[word]) #summ.append(model.wv.word_vec(word)) except Exception as e: print(e) training_data["article"].append(art) training_data["summaries"].append(summ) if i%100==0: logger.info("progress: " + str(((i*100)/len(self.data["articles"])))) i+=1 print('\007') return training_data class LoadDataset(): def __init__(self, path): self.path = path self.dataset_categories = ['training', 'validation', 'test'] self.data = { 'articles': [], 'summaries': [] } def parse_text(self, dir, category, filename): with open(dir+'\\'+category+'\\'+filename, 'r', encoding="Latin-1") as f: #print("{}: {} read successfully".format(category, filename)) text = f.read() text = self.clean_text(text) return text def clean_text(self, text): text = re.sub(r'\[[0-9]*\]', ' ', text) text = re.sub('[^a-zA-Z]', ' ', text) text = re.sub(r'\s+', ' ', text) text = re.sub(r"what's","what is ",text) text = re.sub(r"it's","it is ",text) text = re.sub(r"\'ve"," have ",text) text = re.sub(r"i'm","i am ",text) text = re.sub(r"\'re"," are ",text) text = re.sub(r"n't"," not ",text) text = re.sub(r"\'d"," would ",text) text = re.sub(r"\'s","s",text) text = re.sub(r"\'ll"," will ",text) text = re.sub(r"can't"," cannot ",text) text = re.sub(r" e g "," eg ",text) text = re.sub(r"e-mail","email",text) text = re.sub(r"9\\/11"," 911 ",text) text = re.sub(r" u.s"," american ",text) text = re.sub(r" u.n"," united nations ",text) text = re.sub(r"\n"," ",text) text = re.sub(r":"," ",text) text = re.sub(r"-"," ",text) text = re.sub(r"\_"," ",text) text = re.sub(r"\d+"," ",text) text = re.sub(r"[$#@%&,\"'*!~?%{}()]"," ",text) return text def get_file_names(self, dir, category): files = os.listdir(dir + '\\' + category) print(len(files)) return files def printArticlesum(self, k): print("---------------------original sentence-----------------------") print("-------------------------------------------------------------") print(self.data["articles"][k]) print("----------------------Summary sentence-----------------------") print("-------------------------------------------------------------") print(self.data["summaries"][k]) def load_dataset(self): file_names = self.get_file_names(self.path, self.dataset_categories[0]) for i in range(len(file_names[:1000])): if i%2 == 0: self.data['articles'].append(self.parse_text(self.path, self.dataset_categories[0], file_names[i])) else: self.data['summaries'].append(self.parse_text(self.path, self.dataset_categories[0], file_names[i])) #self.printArticlesum(1) logger.info('data loaded from {} to {}'.format(x, y)) logger.info('Length of data: {}'.format(len(self.data['articles']) + len(self.data['articles']))) return self.data if __name__ == '__main__': data = LoadDataset(PATH).load_dataset() w2v = Word2vec(data) corpus = w2v.create_corpus() one_hot = w2v.encode(corpus) if exists(gensim_model_name): with open(gensim_model_name, 'rb') as model_file: model = pickle.load(model_file) else: model = w2v.word2vec_model(corpus) # model = w2v.update(model, corpus) training_data = w2v.word_vec_matrix(model, one_hot) #print(model['quick']) print('The vocabulary size of the model is: ', len(model.wv.vocab)) print("summary length: ",len(training_data["summaries"][0])) print("article length: ",len(training_data["article"][0]))
997,659
c66344dd52921124076283c662b0554b2290b7e0
# -*- coding: utf-8 -*- '2019-06-18 Created by zhulk' import unittest import os from config.url import caseDir from HTMLTestRunner_cn import HTMLTestRunner from public.common import NOW from config.url import reportDir from email.mime.text import MIMEText from email.header import Header from email.mime.multipart import MIMEMultipart import time import smtplib #创建测试用例 def createTestsuite(): #改变路径到测试路径到testcase并筛选 os.chdir(caseDir) suite = [] test_unit = unittest.TestSuite() discover_suites = unittest.TestLoader.discover(os.getcwd(), pattern='*.py') for testsuite in discover_suites: for testcase in testsuite: test_unit.addTest(testcase) suite.append(test_unit) return suite #执行测试用例并生成报告 def excuteTestcase(suite): os.chdir(reportDir) filename = os.getcwd()+'\\'+ NOW() + ".html" fp = open(filename,'wb') for testcase in suite: runner = HTMLTestRunner( title='测试报告', description='E7商旅测试', stream=fp, verbosity=2,retry=0,save_last_try=True ) runner.run(testcase) #获取最新的报告 def newReport(): lists = os.listdir(reportDir) lists.sort(key=lambda fn: os.path.getmtime(reportDir + '\\' + fn)) # 获取最新文件的绝对路径 newpath = os.path.join(reportDir, lists[-1]) return newpath #发送邮件 def sendEmail(filename): mailfrom = 'zlk_0102@163.com' mailto = '517122472@qq.com' f = open(filename, 'rb') mailcontent = f.read() f.close() msg = MIMEMultipart() msg.attach(MIMEText(mailcontent, _subtype='html', _charset='utf-8')) att = MIMEText(mailcontent, 'base64', 'utf-8') att["Content-Type"] = 'application/octet-stream' att["Content-Disposition"] = 'attachment; filename=' + filename msg.attach(att) msg['Subject'] = Header(u'商旅测试自动化测试报告', 'utf-8') msg['date'] = time.strftime('%a, %d %b %Y %H:%M:%S %z') msg['From'] = mailfrom msg['to'] = mailto username = 'zlk_0102@163.com' password = 'nicaiya01' smtp = smtplib.SMTP() smtp.connect('smtp.163.com') smtp.login(username, password) smtp.sendmail(mailfrom, mailto, msg.as_string()) smtp.quit() if __name__ == '__main__': excuteTestcase(createTestsuite()) sendEmail(newReport())
997,660
6df1c0b2654f60627eed6fa4b961118098f42180
# -*- coding: utf-8 -*- """ @author: Wei, Shuowen https://leetcode.com/problems/squares-of-a-sorted-array/ """ class Solution(object): def sortedSquares(self, nums): """ :type nums: List[int] :rtype: List[int] """ """ if nums[0] >= 0: # [0, 1, 2, 3] return [n**2 for n in nums] if nums[-1] <= 0: # [-3, -2, -1, 0] return [n**2 for n in nums[::-1]] for i in range(len(nums)): if nums[i] > 0: # nums[i] is positive negative_nums = nums[:i] positive_nums = nums[i:] # nums[i] is in the positive_nums break new_nums = [] while negative_nums and positive_nums: if abs(negative_nums[-1]) > positive_nums[0]: new_nums.append(positive_nums.pop(0)) else: new_nums.append(negative_nums.pop()) if positive_nums: new_nums += positive_nums if negative_nums: new_nums += [abs(n) for n in negative_nums[::-1]] return [n**2 for n in new_nums] """ # solution 2, much slower return sorted([n**2 for n in nums])
997,661
fc5603cbdd4d1a9e6646bc6515fdb5763b8b9cf7
import pygame, sys from pygame.locals import * import math import random import Queue # Define some colors BLACK = (0, 0, 0) WHITE = (255, 255, 255) GREEN = (0, 255, 0) MAGENTA = (255, 0, 255) RED = (255, 0, 0) BLUE = (0, 0, 255) YELLOW = (255, 255, 0) DARKGRAY = (64, 64, 64) GRAY = (128, 128, 128) def asColour(f): ''' flaot 0-1.0 to 8 bit gray scale colour. ''' return (int(f*255), int(f*255), int(f*255)) def asValue(colour): ''' 8 bit grayscale colour to value 0-1.0 ''' return float(colour[0]/255.0) def degreesToRadians(deg): ''' Derr! ''' return deg/180.0 * math.pi class OccupancyMap(): ''' 2d map of cells containing probablity of occupation (0.0-1.0) at that position. ''' def __init__(self, size, _map=None): # scale = size of cells in cm. self.x = int(size[0]) self.y = int(size[1]) if _map==None: self.surface = pygame.Surface((size)) # RGB surface else: self.surface = _map self.robotOutline = pygame.Surface((10,10), pygame.SRCALPHA) # define DEFAULT robot shape (10x10 square) self.robotOutline.fill((0,0,0)) def setValue(self, position, value): ''' Set value of a cell (0.0-1.0) ''' self.surface.set_at(position, asColour(value)) def getValue(self, position): ''' Set value of a cell (0.0-1.0) ''' return asValue(self.surface.get_at(position)) def reset(self, value = 0.5): ''' Reset whole map to flat (0.5). ''' self.surface.fill(asColour(value)) def getMap(self): ''' get pygame surface copy of map. ''' return self.surface.copy() def setMap(self, _map): ''' Set pygame surface map. ''' self._map = _map def getMapSize(self): ''' ''' return (self.x, self.y) def stamp(self, position, surface): # stamp a surface into map CENTERED at x,y ''' Stamp another surface (normally with a transparent layer) onto the map. Usually used to mark position occupied by robot as clear of obstructions. Will later be used to stamp arcs of sensor data? ''' w, h = surface.get_size() self.surface.blit(surface, (position[0]-int(w/2), position[1]-int(h/2))) def setRobotShape(self, robotSurface): ''' Set shape used to represent robot when. Should have a key layer! ''' self.robotOutline = robotSurface def robotAt(self, position, theta): ''' Set robot position and mark robot shape as being free from obstruction. ''' robotRotated = pygame.transform.rotate(self.robotOutline, 360*theta/(2.0*math.pi)) self.stamp(position, robotRotated) def createTestWorld(self): ''' Create a test world. ''' self.surface.fill((asColour(1.0))) pygame.draw.rect(self.surface, BLACK, [5,5, self.x-10, self.y-10]) for i in range(0,5): pygame.draw.circle(self.surface, asColour(random.randint(0,60)/100.0+0.4), [random.randint(0,self.x), random.randint(0,self.y)], random.randint(0,self.x/4)) for i in range(0,5): pygame.draw.rect(self.surface, asColour(random.randint(0,60)/100.0+0.4), [random.randint(0,self.x), random.randint(0,self.y), random.randint(0,self.x/4), random.randint(0,self.x/4)]) pygame.draw.rect(self.surface, WHITE, [0, 0, 5, self.y]) pygame.draw.rect(self.surface, WHITE, [0, 0, self.x, 5]) pygame.draw.rect(self.surface, WHITE, [self.x-5, 0, 5, self.y]) pygame.draw.rect(self.surface, WHITE, [0, self.y-5, self.x, 5]) def view(self): # return surface showing map ''' Return surface showing map, robot position, target position. ''' result = self.surface.copy() return result def zoomMap(_map, centre, zoom, size): ''' pygame surface centred on centre, zoomed and clipped to size. ''' ox, oy = _map.get_size() nx, ny = size x, y = size cx, cy = centre if zoom<=(float(nx)/float(ox)): # if zoom will not fit new size.. zoom = float(nx)/float(ox) result = pygame.Surface([ox*2, oy*2]) # Surface big enough to overhang result.fill(RED) result.blit(_map, [ox/2, oy/2]) # copy map into middle of big surface result = result.subsurface([cx, cy, ox, oy]) # surface same size as original, centred. result = pygame.transform.scale(result, [int(ox*zoom), int(oy*zoom)]) # zoom result = result.subsurface([int((ox*zoom-x)/2), int((oy*zoom-y)/2), x, y]) return result mainLoop = True if __name__=="__main__": pygame.init() print 'pygame started' DISPLAY_SURF = pygame.display.set_mode((640, 480)) myMap = OccupancyMap((200, 200)) print 'map created' myMap.reset() print 'reset' print '[0, 0] returns ', myMap.getValue((0, 0)) myMap.setValue((0, 0), 0.99) print '[0, 0]=0.99' print '[0, 0] returns ', myMap.getValue((0, 0)) print print 'R - Reset' print 'C - Create world.' print 'Q - Quit' zoom = 2.0 while mainLoop: for event in pygame.event.get(): if event.type == pygame.QUIT: mainLoop = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_r: myMap.reset() if event.key == pygame.K_c: myMap.createTestWorld() if event.key == pygame.K_q: mainLoop = False if event.key == pygame.K_z: zoom = zoom+0.2 if event.key == pygame.K_a: if zoom>1.0: zoom = zoom-0.2 DISPLAY_SURF.fill( (0,0,0) ) DISPLAY_SURF.blit(myMap.view(), [10,10] ) DISPLAY_SURF.blit(zoomMap(myMap.view(), [0,0], zoom, [350,350]), [250,10] ) DISPLAY_SURF.blit(zoomMap(myMap.view(), [199,199], zoom, [250,250]), [150,110] ) pygame.display.update()
997,662
8a34f011763da0f0774e7ca69792c2f151503642
from eve import Eve from flask import request from oauth2 import BearerAuth, oauth from flask.ext.bootstrap import Bootstrap from eve_docs import eve_docs app = Eve(auth=BearerAuth) oauth.init_app(app) Bootstrap(app) app.register_blueprint(eve_docs, url_prefix='/docs') @app.route('/oauth/authorize', methods=['GET', 'POST']) @oauth.authorize_handler def authorize(*args, **kwargs): if request.method == 'GET': client_id = kwargs.get('client_id') client = Client.query.filter_by(client_id=client_id).first() kwargs['client'] = client return render_template('oauthorize.html', **kwargs) confirm = request.form.get('confirm', 'no') return confirm == 'yes' @app.route('/oauth/token', methods=['GET', 'POST']) @oauth.token_handler def access_token(): return None app.run(port=app.config.get("LISTEN_PORT"),debug=True)
997,663
d60d2f536b1164dfcecad790b484cb247c044f2f
S = input() def f(): if S == "keyence": print("YES") else: n = len(S) for i in range(n): for j in range(i, n): T = "" for k in range(n): if i <= k <= j: continue else: T += S[k] if T == "keyence": print("YES") return else: print("NO") f()
997,664
f2e264c97437d38d1c0a2de5afe1ce2ecfeb787b
""" Formulário usado para apenas como facade para permitir o carregamento e configuração async dos opcionais. """ from django import forms class ProductForm(forms.Form): pass class ServiceForm(forms.Form): pass
997,665
77f5212a84cf02c24a5212ac2c7eb302a5e5d203
from board import Board import copy import random import time static_eval_count = 0 minimax_calls = 0 total_branches = 0 cutoffs = 0 def minimax(game_state, alpha, beta, depth_bound): global minimax_calls global total_branches global static_eval_count global cutoffs if depth_bound == 4: static_eval_count += 1 return (game_state.static_evaluation(), None) # it is irrelevant what we return int second slot elif game_state.current_player == 0: # i.e is AI turn (max node) bestmove = None minimax_calls += 1 for successor_game_state in game_state.generate_successors(): total_branches += 1 # player_move just gets discarded bv, player_move = minimax(successor_game_state, alpha, beta, depth_bound+1) if bv > alpha: alpha = bv bestmove = successor_game_state.last_move_made if alpha >= beta: cutoffs +=1 return (beta, bestmove) return (alpha, bestmove) else: # i.e looking at player turn (min node) bestmove = None minimax_calls += 1 for successor_game_state in game_state.generate_successors(): total_branches += 1 # computer_move is not relevant, we just need to return both for later bv, computer_move = minimax(successor_game_state, alpha, beta, depth_bound+1) if bv < beta: beta = bv bestmove = successor_game_state.last_move_made if beta <= alpha: cutoffs +=1 return (alpha, bestmove) return (beta, bestmove) class Game: def __init__(self, board_size, board, player=0, last_move_made = ((),())): self.board_size = board_size self.board = board self.last_move_made = last_move_made self.current_player = player self.player_symbol = ('x','o') self.endgame = 0 def get_legal_moves(self, current_player): """ Returns a list of of legal moves, as pairs of pairs e.g [((8,8),(5,8)),...] """ legal_moves = [] for row in range(self.board_size): for col in range(self.board_size): if self.board.repr[row][col] == self.player_symbol[current_player]: position = (row,col) move_fn_list = [self.north_move, self.east_move, self.south_move, self.west_move] for move_fn in move_fn_list: move = move_fn(position) if self.is_legal_move(current_player,move): legal_moves.append(move) # now we are going to check for a double jump! start = move[0] cur_end = move[1] new_board = copy.deepcopy(self.board) # Make a copy of the board, and then make the move on that board new_board.movePiece(start,cur_end) continue_move = move_fn(cur_end) # Try to move again in the same direction new_game_state = Game(self.board_size,new_board,current_player) # make a whole new game state and check if our move is legal on that while(new_game_state.is_legal_move(current_player, continue_move)): start_cur = cur_end cur_end = continue_move[1] legal_moves.append((start,cur_end)) new_board = copy.deepcopy(new_board) new_board.movePiece(start_cur,cur_end) continue_move = move_fn(cur_end) new_game_state = Game(new_game_state.board_size,new_board,current_player) return legal_moves def is_legal_move(self, current_player, move): """ Given a move e.g ((8,8),(5,8)), check if that is legal, return true if it is, false otherwise """ starting_pos = move[0] ending_pos = move[1] if ending_pos[0] not in range(self.board_size) or ending_pos[1] not in range(self.board_size): # Discard any generated moves that fall off of the board return False if self.board.repr[starting_pos[0]][starting_pos[1]]!=self.player_symbol[current_player]: print "this should never trigger and is redundant" return False if self.board.repr[ending_pos[0]][ending_pos[1]]!= '.': # Check that landing spot is empty return False middle_pos = (starting_pos[0]-(starting_pos[0]-ending_pos[0])/2,starting_pos[1]-(starting_pos[1]-ending_pos[1])/2) # Check the middle spot is the other piece - this should in theory not matter because the pieces alternate other_player = 1 - current_player if self.board.repr[middle_pos[0]][middle_pos[1]] != self.player_symbol[other_player]: return False return True def generate_successors(self): successors = [] for move in self.get_legal_moves(self.current_player): boardCopy = copy.deepcopy(self.board) boardCopy.movePiece(move[0], move[1]) successors.append(Game(self.board_size, boardCopy, 1-self.current_player, move)) for s in successors: if False: print s.board return successors def player_turn(self): try: legal_moves = self.get_legal_moves(self.current_player) print legal_moves if len(legal_moves) != 0: is_valid_input = False while is_valid_input == False: move_coordinates = (input("Please enter start coordinate: "), input("Please enter end coordinate: ")) # should be two tuples entered actual_move_coordinates = ((move_coordinates[0][0]-1, move_coordinates[0][1]-1), (move_coordinates[1][0]-1, move_coordinates[1][1]-1)) # to convert user input (which is 1 indexed) to 0 indexed (which our board representation is in) is_valid_input = actual_move_coordinates in legal_moves self.board.movePiece(actual_move_coordinates[0], actual_move_coordinates[1]) print(self.board) self.last_move_made = move_coordinates self.current_player = 1 - self.current_player else: self.endgame = 1 print "Player", self.player_symbol[self.current_player], "loses!" except KeyboardInterrupt: raise except: print "You messed up, you dingus" self.player_turn() def computer_turn(self): global minimax_calls if len(self.get_legal_moves(self.current_player)) != 0: computer_move = minimax(self, float("-inf"), float("inf"), 0) computer_move = computer_move[1] print "FROM BOARD:" print self.board if computer_move is not None: self.board.movePiece(computer_move[0], computer_move[1]) print(self.board) print "Made move: ", ((computer_move[0][0]+1, computer_move[0][1]+1), (computer_move[1][0]+1, computer_move[1][1]+1)) self.last_move_made = computer_move self.current_player = 1 - self.current_player else: random_move = random.choice(self.get_legal_moves(self.current_player)) self.board.movePiece(random_move[0], random_move[1]) print(self.board) print "Made move: ", ((random_move[0][0]+1, random_move[0][1]+1), (random_move[1][0]+1, random_move[1][1]+1)) # to present the computer's move nicely to player self.last_move_made = computer_move self.current_player = 1 - self.current_player else: self.endgame = 1 print "Player", self.player_symbol[self.current_player], "loses!" @staticmethod def north_move(pos): return (pos,(pos[0]-2,pos[1])) @staticmethod def east_move(pos): return (pos,(pos[0],pos[1]+2)) @staticmethod def south_move(pos): return (pos,(pos[0]+2,pos[1])) @staticmethod def west_move(pos): return (pos,(pos[0],pos[1]-2)) def static_evaluation(self): my_moves = self.get_legal_moves(0) opponent_moves = self.get_legal_moves(1) if opponent_moves == 0: return float("inf") if my_moves == 0: return float("-inf") return len(my_moves) - len(opponent_moves) def play_game(game_state): print game_state.board to_remove = input("x remove a piece: ") game_state.board.removePiece((to_remove[0]-1,to_remove[1]-1)) print game_state.board to_remove = input("o remove a piece: ") game_state.board.removePiece((to_remove[0]-1,to_remove[1]-1)) while game_state.endgame != 1: if game_state.current_player == 0: game_state.computer_turn() else: game_state.computer_turn() def test_game(game_state): game_state.board.removePiece((3,3)) print game_state.board game_state.board.removePiece((3,2)) print game_state.board while game_state.endgame != 1: if game_state.current_player == 0: game_state.computer_turn() else: game_state.computer_turn() if __name__ == '__main__': start = time.time() test_game(Game(8,Board(8))) print "GAME TOOK", time.time() - start, "SECONDS" print "NUM STATIC EVALS:", static_eval_count print "AVG BRANCHING FACTOR:", total_branches/(minimax_calls+0.0) print "NUM CUTOFFS", cutoffs
997,666
283455d301b21992204a7119c59c91f1bb13294a
#!@Author : Sanwat #!@File : .py import numpy as np a= np.array([1,2,3,4]) b= a print (b) a[2]=0 print ("修改a后,副本b为:\n",b) c= a[0:2] print (c) a[0]=0 print (c) A=np.array([1,2,3,4]) d= A.copy() print (d) A[0]=0 print (d)#副本保留不变
997,667
40c1ab3c3b8ff2715910504b2d8997b9940f363c
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() # Arguments marked as "Required" below must be included for upload to PyPI. # Fields marked as "Optional" may be commented out. setup( name='trio-paho-mqtt', # Required version='0.3.0', # Required description='trio async MQTT Client', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/bkanuka/trio-paho-mqtt', download_url='https://github.com/bkanuka/trio-paho-mqtt/archive/v0.3.0.tar.gz', author_email='bkanuka@gmail.com', classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Trio', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], keywords='trio mqtt', package_dir={'': 'trio_paho_mqtt'}, # Optional packages=find_packages(where='trio_paho_mqtt'), python_requires='>=3.5, <4', install_requires=['paho-mqtt', 'trio'], project_urls={ 'Source': 'https://github.com/bkanuka/trio-paho-mqtt', }, )
997,668
b105e4a167eb02e76b0a3750384bd64a702a4561
from django.utils.translation import ugettext_lazy as _ from models import Link from cms.settings import CMS_MEDIA_URL from cms.plugin_pool import plugin_pool from cms.plugin_base import CMSPluginBase from cms.plugins.link.forms import LinkForm class LinkPlugin(CMSPluginBase): model = Link form = LinkForm name = _("Link") render_template = "cms/plugins/link.html" text_enabled = True def render(self, context, instance, placeholder): if instance.url: link = instance.url elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = "" context.update({ 'name':instance.name, 'link':link, 'placeholder':placeholder, 'object':instance }) return context def icon_src(self, instance): return CMS_MEDIA_URL + u"images/plugins/link.png" plugin_pool.register_plugin(LinkPlugin)
997,669
b40c468c8e36509e88895f90cc71a2098ca62a83
import types import time import ujson from stepist.flow import utils, session from .next_step import call_next_step class StepData(object): flow_data = None meta_data = None def __init__(self, flow_data, meta_data=None): self.flow_data = flow_data self.meta_data = meta_data def get_dict(self): return { 'flow_data': self.flow_data, 'meta_data': self.meta_data } def __json__(self): return ujson.dumps(self.get_dict()) class Step(object): """ Step object. """ # handler function which handle data handler = None # next step object which getting current handler result next_step = None # True, if we need to run current handler in distribute way (using queues) as_worker = None # True, if we need to wait result from current handler # (used in previous step) wait_result = None # Factor object for iterator handling factory = None def __init__(self, app, handler, next_step, as_worker, wait_result, unique_id=None): self.app = app self.handler = handler self.next_step = next_step self.as_worker = as_worker self.wait_result = wait_result self.unique_id = unique_id self.factory = None @property def __name__(self): return self.unique_id or self.handler.__name__ def __call__(self, **kwargs): """ """ try: result_data = self.execute_step(**kwargs) except utils.StopFlowFlag: return None if self.is_last_step(): return result_data # if isinstance(result_data, types.GeneratorType): # for row_data in result_data: # call_next_step(row_data, next_step=self.next_step) # return None return call_next_step(result_data, next_step=self.next_step) def execute_step(self, **data): """ :param data: next step data :param last_step: Step object or step_key value :return: Flow result """ # if 'self_step' in data: # raise RuntimeError("You can't use 'self_step' var in data") handler_data = utils.validate_handler_data(self.handler, data) result_data = self.handler(**handler_data) session.set_flow_data(result_data) return result_data def add_job(self, data, **kwargs): step_data = StepData(flow_data=data, meta_data=session.get_meta_data()) return self.app.worker_engine.add_job(step=self, data=step_data, **kwargs) def receive_job(self, **data): if "flow_data" not in data: raise RuntimeError("flow_data not found in job payload") with session.change_flow_ctx(data.get('meta_data', {}), data['flow_data']): return self(**session.get_flow_data()) def set_factory(self, factory): self.factory = factory def is_last_step(self): if self.next_step is None: return True return False def step_key(self): if isinstance(self.handler, types.FunctionType): key = self.unique_id or self.handler.__name__ else: key = self.unique_id or self.handler.__name__() return "%s" % key
997,670
1cae88040ff3a88cdf4b0da91d5626e46726f0a4
a, b = map(int, input().split()) if b<a or b == a: print(1) else: #if (a*2)>b or (a*2) == b: # print(2) #else: print((b//a)+1)
997,671
2c3c35956dc9cb639c206800ccc1111636926047
# THIS FILE IS GENERATED FROM KIVY SETUP.PY __version__ = '1.11.0' __hash__ = '2c77434d845a97764a9255bc9b80b1803239f3dd' __date__ = '20190615'
997,672
24cac047c09ca72f83d6ce017a01dc8bb0da3a83
# -*- encoding: utf-8 -*- # Copyright 2008 Agile42 GmbH, Berlin (Germany) # Copyright 2007 Andrea Tomasini <andrea.tomasini_at_agile42.com> # Copyright 2011 Agilo Software GmbH All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: # - Felix Schwarz <felix.schwarz__at__agile42.com> # - Martin Häcker <martin.haecker__at__agile42.com> import agilo.utils.filterwarnings from agilo.test import AgiloTestCase from agilo.test.functional.agilo_tester import PageObject class PageObjectTest(AgiloTestCase): warning = """<body> <div class="main" style="left: 249px;"><!-- Main Content --> <div class="nav" id="ctxtnav"> </div> <div class="system-message warning"> <strong>Warning:</strong> fnord warning </div> <div class="admin" id="content"> <h1>Administration</h1> </div> </div> </body> """ notice = """<body> <div class="main" style="left: 249px;"><!-- Main Content --> <div class="nav" id="ctxtnav"> </div> <div class="system-message notice"> <strong>Notice:</strong> fnord notice one </div> <div class="system-message notice"> <strong>Notice:</strong> fnord notice two </div> <div class="admin" id="content"> <h1>Administration</h1> </div> </div> </body> """ def test_can_find_notice_on_page(self): page = PageObject() page.set_html(self.notice) self.assert_true(page.has_notice('fnord notice one')) self.assert_true(page.has_notice('fnord notice two')) def test_can_find_warnings_on_page(self): page = PageObject() page.set_html(self.warning) self.assert_true(page.has_warning('fnord warning')) def test_can_remove_html_formatting(self): page = PageObject() warning_without_html = page.remove_html_and_whitespace(self.warning) self.assert_equals("Warning: fnord warning Administration", warning_without_html) # TODO: has_no_notice_or_warning, etc.
997,673
627a498f9078a41e5700b6594ded67393a847e46
import sys from datetime import datetime import numpy as np import pandas as pd # STATIC VARIABLES # common month between time ranges used for stitching STITCHING_MONTHS = (1, 7) # january and july # common word between keyword sets used for scaling CROSS_SET_KEYWORD = 'ozone' def scale_monthly_data(unscaled_monthly_data, cross_set_join_word=CROSS_SET_KEYWORD): first_cross_set_keyword_occurrence = True first_cross_set_keyword_mean = None rescaled_monthly_table = None keyword_scale = 1.0 for keyword in unscaled_monthly_data.columns: if keyword == 'date': rescaled_column = unscaled_monthly_data['date'] pass else: if cross_set_join_word in keyword: cross_set_monthly_mean = unscaled_monthly_data[keyword].mean(skipna=True) if first_cross_set_keyword_occurrence: first_cross_set_keyword_mean = cross_set_monthly_mean first_cross_set_keyword_occurrence = False else: keyword_scale = cross_set_monthly_mean / first_cross_set_keyword_mean rescaled_column = unscaled_monthly_data[keyword].apply(lambda x: x * keyword_scale) rescaled_monthly_table = pd.concat([rescaled_monthly_table, rescaled_column], sort=True, axis=1) return rescaled_monthly_table def combine_and_scale_keyword_sets(data, months_to_scale, cross_set_join_word=CROSS_SET_KEYWORD): # scales keyword sets between a chosen join word list_of_rescaled_months = list() for last_day_in_month in months_to_scale: first_day_in_month = datetime(last_day_in_month.year, last_day_in_month.month, 1).strftime("%Y-%m-%d") last_day_in_month = last_day_in_month.strftime("%Y-%m-%d") print("Joining keyword sets: " + last_day_in_month) duplicate_first_day_in_month_list = np.where(data["date"] == first_day_in_month)[0] duplicate_last_day_in_month_list = np.where(data["date"] == last_day_in_month)[0] monthly_data = data.iloc[duplicate_first_day_in_month_list[0]: duplicate_last_day_in_month_list[0] + 1] scaled_monthly_data = scale_monthly_data(monthly_data, cross_set_join_word) list_of_rescaled_months.append(scaled_monthly_data) # rescales the duplicate months which are needed later for stitching if len(duplicate_first_day_in_month_list) > 1 and len(duplicate_last_day_in_month_list) > 1: monthly_data = data.iloc[duplicate_first_day_in_month_list[1]: duplicate_last_day_in_month_list[1] + 1] scaled_monthly_data = scale_monthly_data(monthly_data, cross_set_join_word) list_of_rescaled_months.append(scaled_monthly_data) scaled_keyword_sets = pd.concat(list_of_rescaled_months, sort=True) return scaled_keyword_sets def stitch_keywords(data, months_to_stitch, overlapping_months=STITCHING_MONTHS): is_first_run = True first_slice_index = None list_of_stitched_time_ranges = list() scale = pd.DataFrame(index=data.columns) scale['scale'] = 1 for last_day_in_month in months_to_stitch: if last_day_in_month.month in overlapping_months: first_day_in_month = datetime(last_day_in_month.year, last_day_in_month.month, 1).strftime("%Y-%m-%d") last_day_in_month = last_day_in_month.strftime("%Y-%m-%d") print("Stitch dates: " + str(last_day_in_month)) # list of start and end indices current the duplicate months (months used to stitch) duplicate_first_day_in_month_list = np.where(data["date"] == first_day_in_month)[0] duplicate_last_day_in_month_list = np.where(data["date"] == last_day_in_month)[0] if is_first_run and first_slice_index is None: first_slice_index = duplicate_first_day_in_month_list[0] continue else: # creates a df with first 6 months of original data time_range = data.iloc[first_slice_index: duplicate_last_day_in_month_list[0] + 1] time_range = time_range.set_index('date') # scales each column if not is_first_run: # scale is 1 for first run for kw in cols: if kw == 'date': pass else: time_range[kw] = time_range[kw].apply(lambda x: x * scale.loc[kw]) list_of_stitched_time_ranges.append(time_range) if len(duplicate_first_day_in_month_list) == 1 or len(duplicate_last_day_in_month_list) == 1: break if is_first_run: is_first_run = False past_avg = data.iloc[duplicate_first_day_in_month_list[0]: duplicate_last_day_in_month_list[0] + 1].replace(0, pd.np.NaN).mean(axis=0) else: past_avg = time_range.loc[first_day_in_month: last_day_in_month].replace(0, pd.np.NaN).mean(axis=0) future_avg = data.iloc[duplicate_first_day_in_month_list[1]: duplicate_last_day_in_month_list[1] + 1].replace(0, pd.np.NaN).mean(axis=0) past_avg = past_avg.fillna(1) future_avg = future_avg.fillna(1) scale = future_avg / past_avg first_slice_index = duplicate_last_day_in_month_list[1] + 1 return pd.concat(list_of_stitched_time_ranges, sort=True) input_filename = None if len(sys.argv) > 1: if ".csv" not in sys.argv[1]: input_filename = sys.argv[1] + ".csv" else: input_filename = sys.argv[1] else: print("Please pass in a file name and a (optional: ozone is default) common keyword set join word.") exit() trends = pd.read_csv(input_filename) # drops columns named partial cols = [c for c in trends.columns if c[:9] != 'isPartial'] trends = trends[cols] # assumes the list of dates is the first column first_date_in_table = trends.iloc[0, 0] last_date_in_table = trends.iloc[-1, 0] months_in_table = pd.date_range(start=first_date_in_table, end=last_date_in_table, freq='M') if len(sys.argv) == 3: scaled_trends = combine_and_scale_keyword_sets(trends, months_in_table, sys.argv[2]) else: scaled_trends = combine_and_scale_keyword_sets(trends, months_in_table) scaled_trends = scaled_trends[cols] stitched_and_scaled_trends = stitch_keywords(scaled_trends, months_in_table, STITCHING_MONTHS) stitched_and_scaled_trends.to_csv(input_filename.replace('.csv', '_scaled.csv'))
997,674
71699af219e0a6831df93a7456e69f247a6febf2
from starlette.testclient import TestClient from starlette_auth.tables import User from app.main import app def get_user(): user = User(email="ted@example.com", first_name="Ted", last_name="Bundy") user.save() url = app.url_path_for("user_update", user_id=user.id) return user, url def test_response(): _, url = get_user() with TestClient(app) as client: response = client.get(url) assert response.status_code == 200 assert response.template.name == "update.html" assert response.url == f"http://testserver{url}" def test_has_correct_context(): _, url = get_user() with TestClient(app) as client: response = client.get(url) assert "request" in response.context def test_template(): _, url = get_user() with TestClient(app) as client: response = client.get(url) assert response.template.name == "update.html" def test_post_update(): user, url = get_user() with TestClient(app) as client: response = client.post( url, data={ "first_name": user.first_name, "last_name": user.last_name, "email": user.email, "is_active": user.is_active, }, ) # after saving page should be redirt to users screen assert response.status_code == 302 assert response.next.url == f"http://testserver/users"
997,675
52662ae9745928ced550606374f903f5d0ee40dd
#!/usr/bin/env python import sys op = sys.argv[1] for line in sys.stdin: if line[0] != "#" and op == "INS": vals = line.split() vals[2] = str(int(vals[1])+1) line = "\t".join(vals) + "\n" sys.stdout.write(line)
997,676
6ca18aaa265a4e3ca11aec6b56544f64492d91a4
import logging log_format = "%(asctime)s | %(name)s | %(levelname)s | %(message)s" log_activity = "activity_log" logging.basicConfig(filename="sample_container1.log", level=logging.DEBUG, format=log_format) # log data logger = logging.getLogger(log_activity) logger.setLevel(logging.DEBUG) # "application" code logger.debug("debug message") logger.warn("warn message") logger.error("error message") logger.critical("critical message") if __name__ == '__main__': pass
997,677
0b988b7483a8bdd6a087ea30ccbdfbf91a1a11d4
""" Purpose: A Flask server that serves the Summarizer service, integrated to the BigBlueButton client Source Code Creator: Soniya Vijayakumar Project: WILPS Hamburg University Term: Summer 2021 M.Sc. Intelligent Adaptive Systems """ from __future__ import unicode_literals from flask import Flask, render_template, url_for, request, jsonify, make_response, Response, send_file, session from utils.getMeetingInfo import getcurrentMeetingInfo import os import os.path from os import path app = Flask(__name__) CONF_ID = "" MEETING_START_TIME = "" PARTICIPANT_LIST = "" FINAL_SUMMARY = "" SUM_LEN = "" app.secret_key = 'nevertellthistoanyone' SUMMARY = "52699_summary.txt" DEFAULT_MSG = "Meeting yet to be summarized. Click Refresh after few minutes..." def loadMeetingInformation(): pStr = "" #Step 1: Get the Current Meeting Information currentMeetingInfo = getcurrentMeetingInfo() CONF_ID = currentMeetingInfo['voiceConfID'] session['CONF_ID'] = currentMeetingInfo['voiceConfID'] MEETING_START_TIME = currentMeetingInfo['recordTimeStamp'] session['MEETING_START_TIME'] = currentMeetingInfo['recordTimeStamp'] participantList = currentMeetingInfo['userNames'] for p in participantList: pStr = pStr + str(p) + ", " PARTICIPANT_LIST = pStr session['PARTICIPANT_LIST'] = PARTICIPANT_LIST summaryFileName = os.getcwd() + "/MeetingSummaryData/" + CONF_ID + "_summary.txt" #summaryFileName = os.getcwd() + "/MeetingSummaryData/" + SUMMARY if(path.exists(summaryFileName)): fSummary = open(summaryFileName, "r") final_summary = fSummary.read() FINAL_SUMMARY = final_summary else: FINAL_SUMMARY = DEFAULT_MSG session['SUM_LEN'] = 100 SUM_LEN = str(session.get('SUM_LEN')) return CONF_ID, MEETING_START_TIME, PARTICIPANT_LIST, FINAL_SUMMARY, SUM_LEN @app.route('/', methods=['GET','POST']) def index(): CONF_ID, MEETING_START_TIME, PARTICIPANT_LIST, FINAL_SUMMARY, SUM_LEN = loadMeetingInformation() return render_template('index.html',conf_id=CONF_ID,meeting_start_time=MEETING_START_TIME,participant_list=PARTICIPANT_LIST, sum_len=SUM_LEN, final_summary=FINAL_SUMMARY) @app.route("/newroute") def newroute(): CONF_ID = str(session.get('CONF_ID')) summaryFileName = os.getcwd() + "/MeetingSummaryData/" + str(CONF_ID) + "_summary.txt" #Step 2: Get the meeting summary so far fSummary = open(summaryFileName, "r") final_summary = fSummary.read() FINAL_SUMMARY = final_summary MEETING_START_TIME = str(session.get('MEETING_START_TIME')) PARTICIPANT_LIST = str(session.get('PARTICIPANT_LIST')) SUM_LEN = str(session.get('SUM_LEN')) return render_template('index.html',conf_id=CONF_ID,meeting_start_time=MEETING_START_TIME,participant_list=PARTICIPANT_LIST, sum_len=SUM_LEN, final_summary=FINAL_SUMMARY) @app.route("/getPDF") def getPDF(): conf_id = session.get('CONF_ID') summaryFileName = os.getcwd() + "/MeetingSummaryData/PDF/" + str(conf_id) + "_summary.pdf" fName = str(conf_id) + "_summary.pdf" try: return send_file(summaryFileName, attachment_filename=fName) except Exception as e: return str(e) if __name__ == '__main__': app.run(host='localhost', port=7030, debug=True)
997,678
bd6657cd9b05e0712aa2247265c7caf533482b66
# coding: utf-8 import pickle import bz2 def compress(data, compressed, compress_level): return bz2.compress(data, compress_level) if compressed else data def decompress(data, compressed): return bz2.decompress(data) if compressed else data def serialize(data): return pickle.dumps(data) def deserialize(data): return pickle.loads(data)
997,679
18fa6677f3e607cb74d2681f7ba5b80dec931f55
''' Example Data Type x = "Hello World" str x = 20 int x = 20.5 float x = 1j complex x = ["apple", "banana", "cherry"] list x = ("apple", "banana", "cherry") tuple x = range(6) range x = {"name" : "John", "age" : 36} dict x = {"apple", "banana", "cherry"} set x = frozenset({"apple", "banana", "cherry"}) frozenset x = True bool x = b"Hello" bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview ''' my_integer =3 my_float = 3.141592 my_string = "hello" print(type(my_integer)) print(type(my_float)) print(type(my_string)) my_integer = 'some text' print(type(my_integer))
997,680
903783fac5ac01b8b18db7c33b984e3d091a7ceb
import matplotlib.pyplot as plt from termcolor import colored import matplotlib.transforms import numpy as np import pandas as pd def plot_graphics(graph, MAX_ITR, name_save=""): log_itr = list(map(list, zip(*graph.log_itr))) best_ger, avg_ger, median_ger = log_itr[0], log_itr[1], log_itr[2] fig, (ax1, ax2, ax3) = plt.subplots(1, 3) fig.set_size_inches(30, 9) title = fig.suptitle('Fitness - Colonia de Formigas', fontsize=40, x=0.45, y=0.97) plt.rcParams.update({'font.size': 20}) plt.subplots_adjust(left=0.04, right=0.85, top=0.85) plt.gcf().text(0.86, 0.25, (graph.get_parameters() + '\n\n-----------------------------------------\n\n Melhor Fitness: ' + str (graph.best_solution.fitness) + '\n\n Media Fitness: %.2f' % graph.avg_fitness + '\n\n Mediana Fitness: %.2f' % graph.median_fitness), fontsize=16) if MAX_ITR >= 100: step = int(MAX_ITR / 100) else: step = 1 avg_ger_step, median_ger_step, best_ger_step = [], [], [] for i in range(0, MAX_ITR, step): avg_ger_step.append(avg_ger[i]) median_ger_step.append(median_ger[i]) best_ger_step.append(best_ger[i]) ax1.set_title("Melhores fitness") ax1.set_xlabel("Gerações", fontsize='medium') ax1.set_ylabel("Fitness", fontsize='medium') ax1.plot(list(range(0, MAX_ITR, step)), best_ger_step, 'g--', label='Melhor Fitness: ' + str (graph.best_solution.fitness)) ax1.legend(ncol=3) ax1.tick_params(labelsize=18) ax2.set_title("Media e Mediana da fitness") ax2.set_xlabel("Gerações", fontsize='medium') ax2.set_ylabel("Fitness", fontsize='medium') ax2.plot(list(range(0, MAX_ITR, step)), avg_ger_step, 'r--', label='Media Fitness: %.4f' % graph.avg_fitness) ax2.plot(list(range(0, MAX_ITR, step)), median_ger_step, 'b--', label='Mediana Fitness: %.4f' % graph.median_fitness) ax2.legend(ncol=1) ax2.tick_params(labelsize=18) ax3.set_title("Comparação entre as fitness") ax3.set_xlabel("Gerações", fontsize='medium') ax3.set_ylabel("Fitness", fontsize='medium') ax3.plot(list(range(0, MAX_ITR, step)), best_ger_step, 'g--', label='Melhor Fitness: %.4f' % graph.best_solution.fitness) ax3.plot(list(range(0, MAX_ITR, step)), avg_ger_step, 'r--', label='Media Fitness: %.4f' % graph.avg_fitness) ax3.plot(list(range(0, MAX_ITR, step)), median_ger_step, 'b--', label='Mediana Fitness: %.4f' % graph.median_fitness) ax3.legend(ncol=1) ax3.tick_params(labelsize=18) print(colored("\033[1m"+"\n-> Graphic saved in: " + 'graficos/'+name_save+graph.get_parameters()+ '_fitness='+str(graph.best_solution.fitness)+'.png', "green")) fig.savefig('graficos/'+name_save+graph.get_parameters()+'_fitness='+str(graph.best_solution.fitness)+'.png') def plot_table(results, results_ord, file): table = {"NPOP": [], "NGER": [], "TX_M": [], "TX_C": [], "Elitism": [], "Fitness": [], "Avg Fit": [], "Median Fit": [], "STD": []} for i in results_ord: table["NPOP"].append(results[i[0]].nindiv) table["NGER"].append(results[i[0]].nger) table["TX_M"].append(results[i[0]].mutation_rate) table["TX_C"].append(results[i[0]].crossing_rate) table["Elitism"].append(results[i[0]].elitism) table["Fitness"].append(results[i[0]].best_solution.fitness) table["Avg Fit"].append("%.2f" % results[i[0]].avg_fitness) table["Median Fit"].append("%.2f" % results[i[0]].median_fitness) table["STD"].append("%.2f" % results[i[0]].std_fitness) df = pd.DataFrame(data=table) print("\nTable results: \n", df) fig, ax = plt.subplots() fig.patch.set_visible(False) plt.axis('off') plt.grid('off') fig.set_size_inches(12, 11) the_table = ax.table(cellText=df.values,colLabels=df.columns, cellLoc='center', loc='center') the_table.auto_set_font_size(False) the_table.set_fontsize(12) plt.gcf().canvas.draw() points = the_table.get_window_extent(plt.gcf()._cachedRenderer).get_points() points[0,:] -= 120; points[1,:] += 120 nbbox = matplotlib.transforms.Bbox.from_extents(points/plt.gcf().dpi) fig.tight_layout() print(colored("\033[1m"+"\n => Table saved in: " + 'tabelas/'+file[len(file)-1]+'.png', "green")) fig.savefig('tabelas/'+file[len(file)-1]+'.png', dpi=500, bbox_inches=nbbox)
997,681
a95b7bf7b2c3b90073be418b8a726f09eac0b7b4
from sys import stdin from itertools import accumulate def main(): #入力 readline=stdin.readline n=int(readline()) a=list(map(int,readline().split())) s1=list(accumulate(a)) s2=list(accumulate(reversed(a))) ans=float("inf") for i in range(n-1): x=s1[i] y=s2[n-i-2] ans=min(ans,abs(x-y)) print(ans) if __name__=="__main__": main()
997,682
40395d4ca5b6cf40557f634e8f3422b4b2573dd3
from kivy.base import runTouchApp from kivy.lang import Builder runTouchApp(Builder.load_string(''' Label: Button: text:'Hello' pos:root.x,root.top - self.height Button: text:'World!' pos:root.right-200,root.y '''))
997,683
148736d586a0e4cb3feefa5f18a6c7b3a7d19a6c
def reverse(text): return (text[::-1])
997,684
e6d1f32ef39100eaa119842196cb660e7ec7fb85
# -*- coding: utf-8 -*- # @Author: aaronlai # @Date: 2016-10-07 15:03:47 # @Last Modified by: AaronLai # @Last Modified time: 2016-10-07 17:30:26 import numpy as np def initGame(width=19): """Initialize width x width new game""" state = np.zeros((width, width, 2)) available = np.zeros((width, width)) return state, available def makeMove(state, available, action, actor): """specify the actor and the location of the new stone""" available_ret = np.zeros(available.shape) available_ret[:] = available[:] if available_ret[action] == 0: state[action][actor] = 1 available_ret[action] = float("-inf") return state, available_ret else: return None, available_ret def winGame(sub_state): """check if the game winning criteria is met""" for i in range(sub_state.shape[0] - 4): for j in range(sub_state.shape[1] - 4): horizontal = sub_state[i][j: j+5] if (horizontal == 1).all(): return True vertical = [sub_state[i+k, j] for k in range(5)] if (np.array(vertical) == 1).all(): return True diagonal = [sub_state[(i+k, j+k)] for k in range(5)] if (np.array(diagonal) == 1).all(): return True return False def fullGrid(state): """check if the chessboard is full""" return not ((state[:, :, 0] + state[:, :, 1]) == 0).any() def getReward(state, whose_turn, win_reward=500, lose_reward=-1000, even_reward=-100, keepgoing_reward=-10): """calculate the reward given to whom just moved""" reward = [0, 0] if winGame(state[:, :, whose_turn]): reward[whose_turn] = win_reward reward[1 - whose_turn] = lose_reward elif fullGrid(state): reward = [even_reward, even_reward] else: reward[whose_turn] = keepgoing_reward return reward def drawGrid(state): """visualize the chessboard""" grid = np.zeros(state.shape[:2], dtype='<U2') grid[:] = ' ' for i in range(state.shape[0]): for j in range(state.shape[1]): if (state[(i, j)] > 0).any(): if (state[(i, j)] == 1).all(): raise elif state[(i, j)][0] == 1: grid[(i, j)] = 'O' else: grid[(i, j)] = 'X' return grid def displayGrid(grid): """print out the chessboard""" wid = grid.shape[0] show_num = 9 if wid > 9 else wid # chessboard line = '\n' + '- + ' * (wid - 1) + '- {}\n' line = line.join([' | '.join(grid[i]) for i in range(wid)]) # mark the number of its lines bottom = ('\n' + ' {} ' * show_num) bottom = bottom.format(*[i+1 for i in range(show_num)]) if show_num == 9: part = (' {} '*(wid - show_num)) part = part.format(*[i+1 for i in range(show_num, wid)]) bottom += part print(line.format(*[i+1 for i in range(wid)]) + bottom) def try_display(width=19): state, avai = initGame(width) terminate = False print('Start') for i in range(int(width**2 / 2)): for actor in [0, 1]: new_state = None while new_state is None: x = np.random.randint(width) y = np.random.randint(width) move = (x, y) new_state, avai = makeMove(state, avai, move, actor) state = new_state reward = getReward(state, actor) if 500 in reward: print('\tterminal: %d\n' % i) terminate = True break elif -100 in reward: print('\tchessboard is full.\n') terminate = True break if terminate: break displayGrid(drawGrid(state)) def main(): try_display(11) if __name__ == '__main__': main()
997,685
4c5425f4ab34a5149d32008cab152123e051787d
t=int(input()) while(t>0): size=int(input()) arr=[] for a in range(0,size): arr.append(list(map(int,input().split()))) index=-1 for val in range(0,size): if(val+1!=arr[0][val]): index=val+1 check=[0]*index for val in range(1,index): if(val!=1 and arr[0][val]-arr[0][val-1]==1 ): check[val]=check[val-1] elif(val+1==arr[0][val]): check[val]=check[val-1]+1 else: check[val]=check[val-1] a=set(check[1:]) ans=0 if(index!=-1): for val in a: if(val==0): ans+=1 else: ans+=2 print(ans) t-=1
997,686
3407328316395486c31e6fb5344c6b8f785bfcda
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author: ScaleKent import os import pydicom as dicom import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit from PIL import Image file_path = "C:\\Users\\ScaleKent\\Desktop\\MRI\\MRI_python\\dicom_file\\" # Dicom文件之主路径 pixel_dot = [28,30] # 数据点坐标 x,y init_guess = [3000,3000] # 初始猜测值设定 a,b fin_mix_T1_map = np.zeros(shape=(128,128), dtype=np.int64) img = Image.new("L",(128,128)) # 新建相同size的img pixel_num = 0 # 像素点数量 IN_pixel_sums_T1 = 0 # 在像素点内之 T1 总和 for mix_col in range(53,75,1): # x上下限范围 for mix_row in range(87,107,1): # y上下限范围 # 读取 Dicom 之目录,读取每一个张 dcm os.chdir(file_path) for root, dirs, files in os.walk(file_path): print('file_path: %s' %(str(root))) print(files) # 新建矩阵 M_dataset , M_x (乃用于存同像素点上之index值 及 TE\TR\TI等值之存储) [i, j] = [pixel_dot[1], pixel_dot[0]] M_dataset = np.zeros(shape=(len(files)), dtype=np.int) M_x = np.zeros(shape=(len(files)), dtype=np.int) # 将目录下文件读出存至 M_ti_dataset , M_x print('') for fileloop in range(len(files)): dataset = dicom.read_file(files[fileloop]) M_dataset[fileloop] = int(dataset.pixel_array[i,j]) M_x[fileloop] = int(dataset.InversionTime) print('M_dataset: %d' %(dataset.pixel_array[i,j])) print('M_x: %d' %(dataset.InversionTime)) print(M_dataset) print(M_x) print('') # Function def func_TI(x, a, b): return np.abs( a * (1 - 2 * np.exp( np.array(x) * (-1) / np.array(b))) ) # 转存数据 x = M_x y = M_dataset # 使用非线性最小二乘拟合 popt, pcov = curve_fit(func_TI, x, y, p0 = init_guess) # 取得 popt 内之拟合系数 print(popt) a = popt[0] b = popt[1] # 拟合 y 之 data yvals = func_TI(x,a,b) #拟合y值 # 打印 拟合后之参数 print('popt:', popt) print('a:', a) print('b:', b) print('pcov:', pcov) print('yvals:', yvals) print('') print('T1:', b) print('') pixel_num = pixel_num + 1 IN_pixel_sums_T1 = IN_pixel_sums_T1 + b fin_mix_T1_map[mix_row, mix_col] = int(b) img.putpixel([mix_row,mix_col], int(fin_mix_T1_map[i,j])) print(fin_mix_T1_map) # np.save("fin_mix_T1_map.npy", fin_mix_T1_map) # np.savetxt("fin_mix_T1_map.txt",fin_mix_T1_map) print('\npixel_num & IN_pixel_sums_T1') print(pixel_num) print(IN_pixel_sums_T1) print('\nT1 ave') print(IN_pixel_sums_T1/pixel_num)
997,687
ee66e272bfe768b21958d618b2c2d0395788216b
# Pretty printed json Serialization import json as simplejson from django.core.serializers import json from tastypie.serializers import Serializer class PrettyJSONSerializer(Serializer): json_indent = 2 def to_json(self, data, options=None): options = options or {} data = self.to_simple(data, options) return simplejson.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=True, ensure_ascii=True, indent=self.json_indent)
997,688
68cb3c3d3a6c548a8d174ca044542ef0718ac05a
box = 'Box' category_s = 'Categoria' category_p = 'Categorias' cast_s = 'Elenco' cast_p = 'Elencos' character_s = 'Personagem' character_p = 'Personagens' collection_s = 'Coleção' collection_p = 'Coleções' creator_s = 'Criador' creator_p = 'Criadores' delete = 'excluir' director_s = 'Diretor' director_p = 'Diretores' edit = 'Editar' episode_s = 'Episódio' episode_p = 'Episódios' fill = 'Preencher' franchise = 'Franquia' general = 'Geral' insert = 'Inserir' keyword = 'Keyword' media_s = 'Mídia' media_p = 'Mídias' movie_s = 'Filme' movie_p = 'Filmes' now = 'agora' order = 'Ordem' other_s = 'Outro' other_p = 'Outros' poster = 'Poster' principal = 'Principal' search = 'Pesquisar' season_s = 'Temporada' season_p = 'Temporadas' select = 'Selecionar' series_s = 'Série' series_p = 'Séries' star = 'star' status = 'Status' summary_s = 'Resumo' summary_p = 'Resumos' table = 'Tabela' term_s = 'Termo' term_p = 'Termos' title_s = 'Título' title_p = 'Títulos' warning = 'Aviso' year_s = 'Ano' year_p = 'Anos' progress = 'Progresso' for_ = 'Por' of_1 = 'de' or_s = 'ou' imdb = 'IMDB' selected = 'selecione' url = 'URL' episode_cod = 'Código do Episódio' ms = 'Minha Série' with_the_p = 'com os' original_title_s = 'Título Original' original_title_p = 'Títulos Originais' actor_s = 'Ator/Atriz' actor_p = 'Atores/Atrizes' category_1 = 'Categoria 1' category_2 = 'Categoria 2' actual_value = 'Valor Atual' season_num = 'número da temporada' # Windows Title main_widow = 'Coleção de Filmes e Séries' insert_ok = 'Sucesso ao Inserie' insert_error ='Erro ao Inserir' edit_ok = 'Sucesso ao Editar' edit_error = 'Erro ao Editar' delete_ok = 'Sucesso ao Excluir' delete_error = 'Problemas ao Excluir' db_error = 'Erro Com Banco De Dados' delete_orphans = 'Excluir Órfãos' select_movie = 'Selecionar Filmes' select_series = 'Selecionar Séries' help_insert_movie = 'Ajuda Inserir Filme' help_insert_series = 'Ajuda Inserir Séries' help_edit_movie = 'Ajuda Editar Filmes' help_edit_series = 'Ajuda Editar Séries' help_edit_cast = 'Ajuda Editar Elenco' table_movie_categories = 'Tabela Filmes Categoria' search_web_url = 'Pesquisar Web Url Po Título' rewrite_html = 'Rewrite HTML' # Labels lb_search_imdb = 'Insira a URL do IMDB' lb_search_ad = 'Insira a URL do Adoro Cinema' lb_search_season_imdb = 'IMDB URL' lb_search_season_ms = 'Minha Série URL' lb_edit_select_movie = 'Selecione o Filme' lb_edit_select_series = 'Selecione a Série' lb_no_edit_cast = '<html><head/><body><p>Elenco ' \ '<span style="font-size: 10pt;">' \ '(para editar elenco vá Editar->Filme->Elenco)<span>' \ '</p></body></hmtl>' lb_others_directors = '<html><body>' \ '<p style="font-size: 12pt; line-height: 1.3;"> Inserir outros ' \ 'diretores referentes a um filme.</p></body></html>' lb_others_creator = '<html><body>' \ '<p style="font-size: 12pt; line-height: 1.3;"> Inserir outros ' \ 'criadores referentes a uma série.</p></body></html>' lb_view_select = "Selecione um Título" lb_url = 'Web url' lb_quantity = 'Quantidade' lb_total = 'Total' lb_episode_search = 'Pesquisar Episódio' lb_select_series = 'Primeiro selecione a série' lb_time = 'Tempo' lb_search_url = 'Url da Pesquisa' # Table Headers code = 'Cód.' last_edit = 'Última Edição' # Buttons pb_save = 'SALVAR' pb_delete = 'EXCLUIR' pb_clear = 'LIMPAR' pb_leave = 'SAIR' pb_select_all = 'SELECIONAR TUDO' pb_help = 'AJUDA' pb_search = 'PESQUISAR' pb_imdb_search_confirm = 'Pesquisar no IMDB' pb_ad_search_confirm = 'Pesquisar no Adoro Cinema' pb_imdb_ad_search = 'Preencher Campos' pb_search_imdb = 'Pesquisar Episódio IMDB' pb_ms_search = 'MINHA SÉRIE' pb_rewrite_html = 'REWRITE HTML' # Menu menu_movie_others = 'Filmes Outros' menu_series_others = 'Séries Outros' menu_others = 'Outros' menu_search_media_year = 'Mídia/Ano' # Tool Tip rb_star_tt = 'Marque para os principais atores.' with_term_tt = 'Pesquisa com os termos que você digitar.' with_title_term_tt = 'Buscar título com os termos:' with_episode_term_tt = 'ou episódios com os termos:' time_tt = 'Tempo de duração do filme.' enter_required = '<html><head/><body>' \ '<p>Entre com o ano e pressione enter para pesquisar</p>' \ '</body></html>' director_tt = '<html><head/><body>' \ '<p>Para editar os diretores vá em Filmes Outros &rarr; ' \ 'Diretores</p></body></html>' creator_tt = '<html><head/><body>' \ '<p>Para editar os criadores vá em Séries Outros &rarr; ' \ 'Criadores</p></body></html>' html_write = 'Não foi possível criar a página html.' imdb_search_tt = '<html><head/><body>' \ '<p>Buscar vários items de um filme no ' \ '<span style=" font-weight:600;">IMDB</span> e já preencher ' \ 'os campos com esses items.</p>' \ '<p>Para isso basta você acessar a página do filme no ' \ '<span style="font-weight:600;">IMDB copiar e colar a URL ' \ '</span>no campo fornecido.</p></body></html>' ad_search_tt = '<html><head/><body>' \ '<p>Buscar vários items de um filme no ' \ '<span style=" font-weight:600;">Adoro Cinema</span> ' \ 'e já preencher os campos com esses items.</p>' \ '<p>Para isso basta você acessar a página do filme no ' \ '<span style="font-weight:600;">Adoro Cinema copiar e colar a URL ' \ '</span>no campo fornecido.</p><p style="font-weight:600;">' \ 'Preste atenção aos resultados fornecidos eles nem sempre ' \ 'serão os valores esperados</p></body></html>' pb_add_row_tt = '<html><body><p>Pressione o botão <span style="color: red;">+' \ '</span> para adicionar mais uma linha a tabela.</p></body></html>' ms_episode_search = "<html><head/><body>" \ "<p>No site Minha Série os episódios são mostrados em url " \ "separadas. Portanto temos que percorrer uma série de urls " \ "para buscar os valores desejados. O MSCollection pode " \ "percorrer essas urls por você.</p>" \ "<p>Para que o MSCollection percorra as urls nós " \ "decidimos iniciar a pela url do último episódio da " \ "temporada, portanto, busque a url do último episódio e " \ "insira aqui" \ "</p></body></html>" season_num_tt = "<html><head/><body>" \ "<p>O número de temporadas que você possui dessa série.</p>" \ "<p>Se eles estiverem em uma sequencia pode inseri-los da " \ "seguinte forma 1:4, outra forma seria separando eles por " \ "virgula 1, 2, 4, 5.</p></body></html>" # Error Message no_title = 'O campo título e ano são obrigatório.' no_series = 'Você precisa selecionar uma série.' no_season = 'Você precisa selecionar uma temporada.' no_director = 'O campo diretor precisa de um valor.' no_movie = 'Você precisa selecionar um filme.' no_creator = 'O campo criador precisa de um valor.' msg_db_conn = '<html><body><p style="color:red;">Erro ao tentar conectar com ' \ 'o banco de dados.</p></body></html' no_year = 'O campo ano é obrigatório.' cast_error = 'Não foi possível inserir o elenco ' series_exist = 'A série já foi inserida no banco de dados.' movie_exist = 'O filme já foi inserida no banco de dados.' # Message Function def msg_insert_ok(name): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + \ '</span> foi inserido com sucesso.</p></body></html>' return text def msg_insert_season_ok(name, num): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> foi inserido com sucesso.</p></body></html>' return text def msg_edit_season_ok(name, num): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> foi editado com sucesso.</p></body></html>' return text def msg_insert_epidsode_ok(name, num): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> episódios inseridos com sucesso.</p></body></html>' return text def msg_edit_epidsode_ok(name, num): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> episódios editados com sucesso.</p></body></html>' return text def msg_insert_episode_error(name, num): text = '<html><body><p style="color:#000000;">Não foi possível inserir ' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '.</span></p></body></html>' return text def msg_edit_episode_error(name, num): text = '<html><body><p style="color:#000000;">Não foi possível editar ' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '.</span></p></body></html>' return text def msg_insert_erro(name): text = '<html><body><p style="color:red;">Não foi possível inserir ' \ '<span style="color:#3a90c7;">' + name + \ '</span> no banco de dados.</p></body></html>' return text def msg_insert_season_error(name, num): text = '<html><body><p style="color:#000000;"> Erro ao inserir ' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> no banco de dados.</p></body></html>' return text def msg_edit_season_error(name, num): text = '<html><body><p style="color:#000000;"> Erro ao editar ' \ '<span style="color:#3a90c7;">' + name + ' ' + num + \ '</span> no banco de dados.</p></body></html>' return text def msg_error_insert_cast(a, c): text = '<html><body><p style="color:red;">Não foi possível inserir ' \ '<span style="color:#3a90c7;">'+ a + ' como ' + c + \ '</span> no banco de dados.</p></body></html>' return text def msg_edit_ok(name): text = '<html><body><p style="color:#000000;">' \ '<span style = "color:#3a90c7;">' + name + '</span>' \ ' foi editado com sucesso.</p></body></html>' return text def msg_edit_erro(name): text = '<html><body><p style="color:red;">Não foi possível editar ' \ '<span style = "color:#3a90c7;">' + name + '</span>' \ '.</p></body></html>' return text def msg_before_delete(name): text = '<html><body><p style="color:red;">Tem certeza que deseja excluir ' \ '<span style="color:#3a90c7;">' + name + '</span> ' \ 'definitivamente do banco de dados.</p></body></html>' return text def msg_delete_ok(name): text = '<html><body><p style="color:#000000;">' \ '<span style="color:#3a90c7;">' + name + \ '</span> foi excluido com sucesso.</p></body></html>' return text def msg_delete_erro(name): text = '<html><body><p style="color:red;">Não foi possível excluir ' \ '<span style="color:#3a90c7;">' + name + '</span>.</p></body></html>' return text def msg_delete_others(obj, name, references): text = '<html><body><p style="color: red;">Atenção remover ' \ '<span style="color:#3a90c7;">{' + obj + ': ' + name + '}</span>' \ ' não é recomendado!!!</p>' \ '<p style="color: red;">Os seguinte filmes e séries serão' \ ' afetados:</p>' \ '<p style="color:#3a90c7;">' + references + '</p>' \ '<p style="color: red; font-weight: bold;">' \ 'DESEJA REALMENTE CONTINUAR</p></body></html>' return text def msg_insert_obj_error(name, obj): text = '<html><body><p style="color:red;">Não foi possível inserir ' \ '<span style="color:#3a90c7;">' + name + ' em ' + obj + \ '</span> no banco de dados.</p></body></html>' return text # IMDB Categories # # Não apague estes valores. Eles servem para tradução das categorias do # IMDB que estao em inglês para português. imdb_categories = { 'Action': 'Ação', 'Adult': 'Adulto', 'Adventure': 'Aventura', 'Animation': 'Animação', 'Biography': 'Biografia', 'Comedy': 'Comedia', 'Crime': 'Crime', 'Documentary': 'Documentário', 'Drama': 'Drama', 'Family': 'Família', 'Fantasy': 'Fantasia', 'Film Noir': 'Film Noir', 'Game-Show': 'Game-Show', 'History': 'Histórico', 'Horror': 'Terror', 'Musical': 'Musical', 'Music': 'Música', 'Mystery': 'Mistério', 'News': 'Notícias', 'Reality-TV': 'Reality-Show', 'Romance': 'Romance', 'Sci-Fi': 'Ficção Científica', 'Short': 'Curta Metragem', 'Sport': 'Esporte', 'Superhero': 'Super-Herói', 'Talk-Show': 'Talk-Show', 'Thriller': 'Suspense', 'War': 'Guerra', 'Western': 'Faroeste', }
997,689
2a7e2e3dd0927242a51e22d3245e76d2696c6ac1
from django.apps import AppConfig class CodeAuditConfig(AppConfig): name = 'code_audit'
997,690
2dbb514e803e8232bd03d58b2dbae22b62ee8a3d
import cgi from app_base import AppBase class UwsgiHello(AppBase): """A sample hello world testbed""" def __init__(self, environ, start_response): AppBase.__init__(self, 'UwsgiHello', environ, start_response) def application(self): self.start_response("200 OK", [("Content-Type", "text/html")]) message = self.qs_get('message', 'unknown')[0] self.log(3, 'raw message:' + str(message)) return [("hello there. You said &quot;" + cgi.escape(message) + "&quot;").encode()]
997,691
0e3e3d3878f3f5f1fa2cbf7dd2146be1e5841014
from tkinter import * import cv2 top = Tk() top.title("Irregular motion detector") top.geometry("1080x720") def fun(): cap = cv2.VideoCapture(0) frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D') out = cv2.VideoWriter("output.mp4", fourcc, 5.0, (1280, 720)) ret, frame1 = cap.read() ret, frame2 = cap.read() print(frame1.shape) while True: diff = cv2.absdiff(frame1, frame2) gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) dilated = cv2.dilate(thresh, None, iterations=3) contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: (x1, y1, w, h) = cv2.boundingRect(contour) if cv2.contourArea(contour) < 900: continue cv2.rectangle(frame1, (x1, y1), (x1 + w, y1 + h), (0, 255, 0), 2) cv2.putText(frame1, "Status: {}".format('Irregular motion'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1) approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True) cv2.drawContours(frame1, [approx], 0, (0, 0, 0), 1) x = approx.ravel()[0] y = approx.ravel()[1] - 5 if len(approx) == 3: cv2.putText(frame1, "Triangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) elif len(approx) == 4: x1, y1, w, h = cv2.boundingRect(approx) aspectRatio = float(w) / h print(aspectRatio) if aspectRatio >= 0.95 and aspectRatio <= 1.05: cv2.putText(frame1, "square", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) else: cv2.putText(frame1, "rectangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) elif len(approx) == 5: cv2.putText(frame1, "Pentagon", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) elif len(approx) == 10: cv2.putText(frame1, "Star", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) else: cv2.putText(frame1, "Irregular Shape or Circle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0)) image = cv2.resize(frame1, (1280, 720)) out.write(image) cv2.imshow("feed", frame1) frame1 = frame2 ret, frame2 = cap.read() k = cv2.waitKey(1) if k == ord('q'): break cv2.destroyAllWindows() cap.release() out.release() uname = Label(top, text = "DETECT IRREGULAR MOVING OBJECT AND TRACKING BASED ON COLOUR AND SHAPE IN REAL-TIME SYSTEM").place(x = 250,y = 50) uname3 = Label(top, text = "MADE FOR GRAPHIC ERA HILL UNIVERSITY 4TH SEM MINI-PROJECT").place(x = 250,y = 100) b1 = Button(top, text="START THE PROGRAM", command=fun, activeforeground="red", activebackground="pink", pady=10) uname2 = Label(top, text = "Created By Abhishek Chauhan").place(x = 600,y = 600) b1.place(relx=0.5, rely=0.5, anchor=CENTER) top.mainloop()
997,692
59f262832900a39ac7c97ff1b33b2360a017d63f
import numpy as np from chainer.utils.conv import col2im_cpu, get_deconv_outsize from test.util import generate_kernel_test_case, wrap_template from webdnn.graph.axis import Axis from webdnn.graph.graph import Graph from webdnn.graph.operators.col2im import Col2Im from webdnn.graph.order import OrderNHWC, OrderNCHW, Order from webdnn.graph.variable import Variable col_chainer_order = Order([Axis.N, Axis.C, Axis.KH, Axis.KW, Axis.H, Axis.W]) @wrap_template def template(col_shape=(2, 5, 3, 3, 3, 4), col_order=col_chainer_order, im_order=OrderNHWC, ksize=(3, 3), padding=(1, 1), stride=(1, 1), description: str = ""): col = Variable(col_shape, col_order) op = Col2Im(None, ksize, stride, padding) im, = op(col) im = im.change_order(im_order) vcol = np.random.rand(*(col.shape_dict[a] for a in col_chainer_order.axes)).astype(np.float32) h1 = get_deconv_outsize(col.shape_dict[Axis.H], op.KH, op.SH, op.PH) w1 = get_deconv_outsize(col.shape_dict[Axis.W], op.KW, op.SW, op.PW) vim = col2im_cpu(vcol, op.SH, op.SW, op.PH, op.PW, h1, w1) vcol = vcol.transpose([col_chainer_order.axes_dict[a] for a in col_order.axes]) vim = vim.transpose([OrderNCHW.axes_dict[a] for a in im_order.axes]) generate_kernel_test_case( description=f"Col2Im {description}", backend=["webgpu", "webgl", "webassembly"], graph=Graph([col], [im]), inputs={col: vcol}, expected={im: vim}, ) def test_NHWC(): template() def test_NCHW(): template(col_order=Order([Axis.N, Axis.C, Axis.KH, Axis.KW, Axis.H, Axis.W]), col_shape=(2, 5, 3, 3, 3, 4)) def test_wide_stride(): template(ksize=3, stride=2, padding=1) def test_no_padding(): template(padding=0)
997,693
afb15e0b535a761eda6513e01409ab5f5d5664bb
from pathlib import Path import xarray as xr import numpy as np from .utils import make_sentinel_dataset_name, make_hrsl_dataset_name, load_sentinel from typing import cast, Tuple, Union class Engineer: def __init__(self, data_dir: Path) -> None: self.raw_folder = data_dir / "raw" self.processed_folder = data_dir / "processed" self.output_folder = data_dir / "features" self.output_folder.mkdir(exist_ok=True) self.without_buildings = self.output_folder / "without_buildings" self.with_buildings = self.output_folder / "with_buildings" self.without_buildings.mkdir(exist_ok=True) self.with_buildings.mkdir(exist_ok=True) def _check_necessary_files_exist( self, country_code: str ) -> Tuple[Path, Path, bool]: hrsl_data = self.processed_folder / make_hrsl_dataset_name(country_code) sentinel_data = self.raw_folder / make_sentinel_dataset_name(country_code) return sentinel_data, hrsl_data, (hrsl_data.exists() and sentinel_data.exists()) @staticmethod def sentinel_to_numpy(ds: xr.Dataset) -> np.ndarray: r"""Return a sentinel ds prepared by the preprocessor as a numpy array with dimensions [lat, lon, channels] """ array = ds.transpose("lat", "lon", "band").to_array().values return np.squeeze(array, 0) @staticmethod def hrsl_to_numpy(ds: xr.Dataset) -> np.ndarray: r"""Return a hrsl ds prepared by the preprocessor as a numpy array with dimensions [lat, lon] """ array = ds.transpose("lat", "lon").to_array().values return np.squeeze(array, 0) def process_single_filepair( self, country_code: str, hrsl_filepath: Path, sentinel_filepath: Path, imsize: Union[int, Tuple[int, int]], val_ratio: float, ) -> None: if isinstance(imsize, int): imsize = (imsize, imsize) lat_imsize, lon_imsize = cast(Tuple, imsize) hrsl_ds = xr.open_dataset(hrsl_filepath) sentinel_ds = load_sentinel(sentinel_filepath) filename = hrsl_filepath.name assert hrsl_ds.lat.size == sentinel_ds.lat.size assert hrsl_ds.lon.size == sentinel_ds.lon.size skipped_files = counter = cur_lat = 0 max_lat, max_lon = hrsl_ds.lat.size, hrsl_ds.lon.size while (cur_lat + lat_imsize) <= max_lat: cur_lon = 0 while (cur_lon + lon_imsize) <= max_lon: hrsl_slice = self.hrsl_to_numpy( hrsl_ds.isel( lat=slice(cur_lat, cur_lat + lat_imsize), lon=slice(cur_lon, cur_lon + lon_imsize), ) ) sentinel_slice = self.sentinel_to_numpy( sentinel_ds.isel( lat=slice(cur_lat, cur_lat + lat_imsize), lon=slice(cur_lon, cur_lon + lon_imsize), ) ) if not np.isnan(sentinel_slice).any(): self.save_arrays( hrsl_slice, sentinel_slice, country_code, filename, counter, val_ratio, ) counter += 1 else: skipped_files += 1 cur_lon += lon_imsize cur_lat += lat_imsize print(f"Saved {counter} files, skipped {skipped_files}") def save_arrays( self, hrsl_slice: np.ndarray, sentinel_slice: np.ndarray, country_code: str, filename: str, file_idx: int, val_ratio: float, ) -> None: is_val = np.random.uniform() <= val_ratio if is_val: data_subset = "validation" else: data_subset = "training" foldername = f"{country_code}_{filename}_{file_idx}" contains_buildings = hrsl_slice.max() >= 1 if contains_buildings: pair_folder = self.with_buildings / data_subset / foldername pair_folder.mkdir(exist_ok=True, parents=True) else: pair_folder = self.without_buildings / data_subset / foldername pair_folder.mkdir(exist_ok=True, parents=True) if contains_buildings: # otherwise, this is just an array of 0s - no point # in saving it np.save(pair_folder / "y.npy", hrsl_slice) np.save(pair_folder / "x.npy", sentinel_slice) print(f"Saved {foldername}") def process_country( self, country_code: str, imsize: Union[int, Tuple[int, int]] = 224, val_ratio: float = 0.2, ) -> None: sentinel_folder, hrsl_folder, files_exist = self._check_necessary_files_exist( country_code ) if not files_exist: print(f"Missing folders for {country_code}! Skipping") return None for sentinel_file in sentinel_folder.glob("**/*"): if sentinel_file.name.endswith(".tif"): hrsl_file = hrsl_folder / sentinel_file.name assert hrsl_file.exists() print(f"Processing {sentinel_file.name}") self.process_single_filepair( country_code, hrsl_file, sentinel_file, imsize=imsize, val_ratio=val_ratio, ) else: print(f"Skipping {sentinel_file.name}")
997,694
ac24a257564a2bafe0a44975b4a2bbe258314a42
from app import db from .base import BaseModel class WritingBaseModel(BaseModel): title = db.Column(db.String(120)) text = db.Column(db.Text)
997,695
365ed16675ff00e1b1c7488ed0ce375cc871babb
# -*- coding: utf8 -*- """ Dialogflow plugin ocnfiguration Author: Romary Dupuis <romary@me.com> """ from lifoid.config import Configuration, environ_setting class DialogflowConfiguration(Configuration): """ Configuration for the web server to run an admin UI. """ access_token = environ_setting('DIALOGF_ACCESS_TOKEN', '', required=True) dev_access_token = environ_setting('DIALOGF_DEV_ACCESS_TOKEN', '', required=False)
997,696
ac472a886e57e9d55aa61db7b9adb02db9af022b
import copy # for deepcopy function class Matrix(object): '''This is the main class for all 2x2 and 3x3 matrices. It contains all functions that can be used commonly for both the types of matrices that we deal with in this project. It serves as the parent class for the classes: twoBytwo and threeBythree. ''' def __init__(self,A,order): self.A = copy.deepcopy(A) self.order = order def __str__(self): # overides the str function and prints the numbers in the form of a matrix (in our case, it returns HTML to facilitate printing on the webpage) returned_string = "<table>" for i in range(1,self.order+1): returned_string += "<tr>" for j in range(1,self.order+1): returned_string += "<td>" + str(self.A[i][j]) + "</td>" returned_string += "</tr>" returned_string += '</table>' return returned_string def transpose(self): # returns the transpose of 2x2 and 3x3 matrix depending on the order of the Matrix object inverted = copy.deepcopy(self.A) for i in range(1,self.order+1): for j in range(1,self.order+1): inverted[j][i] = self.A[i][j] return Matrix(inverted,self.order) def multiply(self,other): # returns the multiplication of two square matrices of the same order multi = copy.deepcopy(self.A) for i in range(1,self.order+1): for j in range(1,self.order+1): for k in range(1,self.order+1): multi[i][j] += self.A[i][k] * other.A[k][j] multi[i][j] = multi[i][j]-self.A[i][j] return Matrix(multi,self.order) class twoBytwo(Matrix): ''' This is a sub-class of the Matrix class and is specific to 2x2 matrices. We use the OOP concept of inheritance to inherit the initialization, str, transpose and multiply functions from the main class. This class mainly defines the determinant and inverse functions for 2x2 matrices. ''' def determinant(self): # returns the determinant of the 2x2 matrix represented by the current object B = self.A ans = (B[1][1] * B[2][2]) - (B[2][1] * B[1][2]) return ans def inverse(self): # returns the inverse of a 2x2 object if it exists, or informs the user in case the determinant does not exist det = self.determinant() if det == 0: return "Inverse does not exist." adj = copy.deepcopy(self.A) adj[1][1] = round(self.A[2][2]/float(det),3) adj[2][2] = round(self.A[1][1]/float(det),3) adj[1][2] = round(-adj[1][2]/float(det),3) adj[2][1] = round(-adj[2][1]/float(det),3) return Matrix(adj,2) class threeBythree(Matrix): ''' This is a sub-class of the Matrix class and is specific to 3x3 matrices. We use the OOP concept of inheritance to inherit the initialization, str, transpose and multiply functions from the main class. This class mainly defines the determinant and inverse functions for 3x3 matrices. ''' def determinant(self): # returns the determinant of the 3x3 matrix represented by the current object B = self.A ans = 0 for x in range(1,self.order+1): z = [] for i in range(2,self.order+1): for j in range(1,self.order+1): if j==x: pass else: z.append(B[i][j]) Z = [None,[None,z[0], z[1]], [None,z[2], z[3]]] twoBytwoSub = twoBytwo(copy.deepcopy(Z),2) subDeterminant = twoBytwoSub.determinant() if x%2==0: ans += -B[1][x] * subDeterminant else: ans += B[1][x] * subDeterminant return ans def inverse(self): # returns the inverse of a 3x3 object if it exists, or informs the user in case the determinant does not exist B = self.A cofactors = copy.deepcopy(self.A) coFactorElem = 0 mainDeterminant = self.determinant() if mainDeterminant == 0: return "Inverse does not exist." for x in range(1,4): for y in range(1,4): z = [] for i in range(1,4): for j in range(1,4): if (x==i) or (y==j): pass else: z.append(B[i][j]) Z = [None,[None,z[0], z[1]], [None,z[2], z[3]]] twoBytwoSub = twoBytwo(copy.deepcopy(Z),2) subDeterminant = twoBytwoSub.determinant() if ((x+y)%2 == 0): coFactorElem = subDeterminant else: coFactorElem = -1 * subDeterminant cofactors[x][y] = coFactorElem cofactorsMatrix = threeBythree(cofactors,3) adjoint = cofactorsMatrix.transpose() for i in range(1,4): for j in range(1,4): adjoint.A[i][j] = round(adjoint.A[i][j]/float(mainDeterminant),3) return adjoint if __name__ == '__main__': # the following code is only for testing purposes...this module is imported into app.py for its main use in the webapp. order = 2 if order == 2: a11 = 1 a12 = 2 a21 = 3 a22 = 4 A = [None,[None,a11, a12], [None,a21, a22]] our_matrix = twoBytwo(copy.deepcopy(A),order) x = our_matrix.inverse() print x if order == 3: a11 = 1 a12 = 2 a13 = 3 a21 = 4 a22 = 5 a23 = 6 a31 = 7 a32 = 6 a33 = 8 A = [None,[None,a11, a12, a13], [None,a21, a22, a23], [None,a31, a32, a33]] matrixA = threeBythree(copy.deepcopy(A),order) x = matrixA.inverse() print x
997,697
39b94bd03467495e065f8c8f40647d6a63d7dafa
user_sec = int(input("Введите секуны: ")) hours = user_sec // 60**2 minutes = (user_sec % 60**2) // 60 seconds = (user_sec % 60**2) % 60 print(f"{hours:02d}: {minutes}: {seconds}")
997,698
f4158be88f433b4ecc6f3afae17d5182502539ec
from qqq.ccc import n def add3(param): print("Hello") return param + 3
997,699
75b5c58349b159d8e5a32c3ecd4fb09db3d00597
""" Check if it is a triangle """ t = int(input()) while(t>0): a,b,c = [int(x) for x in input().split()] if a + b <= c or a + c <= b or b + c <= a: print("NO") else: print("YES") t-=1