row_id
int64
0
48.4k
init_message
stringlengths
1
342k
conversation_hash
stringlengths
32
32
scores
dict
37,145
what is lossless image compression
7a5801047278ea5bcc039abc7625bcf1
{ "intermediate": 0.208431676030159, "beginner": 0.18589553236961365, "expert": 0.6056727766990662 }
37,146
print all attributes and functions in python
11c6bc8ffd9af7646eb008dcab45bd97
{ "intermediate": 0.269220232963562, "beginner": 0.573676347732544, "expert": 0.15710341930389404 }
37,147
complete this code for revit api to test: public void MoveColumn(Autodesk.Revit.DB.Document document, FamilyInstance column) { // get the column current location LocationPoint columnLocation = column.Location as LocationPoint; XYZ oldPlace = columnLocation.Point; // Move the column to new location. XYZ newPlace = new XYZ(10, 20, 30); ElementTransformUtils.MoveElement(document, column.Id, newPlace); // now get the column's new location columnLocation = column.Location as LocationPoint; XYZ newActual = columnLocation.Point; string info = "Original Z location: " + oldPlace.Z + "\nNew Z location: " + newActual.Z; TaskDialog.Show("Revit",info); }
cedd162424253e3e967f59518c8026dc
{ "intermediate": 0.46160009503364563, "beginner": 0.2687785029411316, "expert": 0.26962146162986755 }
37,148
Is it possible to chat with bing chat for free?
9dc663f338e62b81da845d6769549020
{ "intermediate": 0.33658406138420105, "beginner": 0.21682949364185333, "expert": 0.4465864896774292 }
37,149
In Unity, how to access shader property and change it through script
06a52b3d80e3fb8da4ca533b2f41d414
{ "intermediate": 0.39498940110206604, "beginner": 0.3614630401134491, "expert": 0.24354754388332367 }
37,150
In revit api how to get wall from element id instead of Wall wall = new FilteredElementCollector(doc)
be72d35d544a6de2ea3322e8be5f84eb
{ "intermediate": 0.7552836537361145, "beginner": 0.09782055020332336, "expert": 0.14689576625823975 }
37,151
Implement this code inside httplistner inside revit when get request is invoked from outside: [Transaction(TransactionMode.Manual)] [Regeneration(RegenerationOption.Manual)] public class WallMover : IExternalCommand { public Result Execute( ExternalCommandData commandData, ref string message, ElementSet elements) { UIApplication uiApp = commandData.Application; Document doc = uiApp.ActiveUIDocument.Document; // In a real application, you would get the wall by selection or some other method. // For the example purpose, we are fetching the first wall we find. //ElementId wallId = 1597039; // Replace with your known ElementId //Wall wall = doc.GetElement(wallId) as Wall; int elementIdToFind = 1597039; ElementId elementId = new ElementId(Convert.ToInt64(elementIdToFind)); Wall wall = doc.GetElement(elementId) as Wall; //Wall wall = new FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().Cast<Wall>().FirstOrDefault(); if (wall == null) { message = "No wall found."; return Result.Failed; } bool result = MoveUsingLocationCurve(doc, wall); if (result) { return Result.Succeeded; } else { message = "Wall could not be moved."; return Result.Failed; } } private bool MoveUsingLocationCurve(Document doc, Wall wall) { // Check if the wall has a valid location curve LocationCurve wallLocationCurve = wall.Location as LocationCurve; if (wallLocationCurve == null) { return false; } XYZ translationVec = new XYZ(10, 20, 0); // Start a new transaction for modifying the document using (Transaction trans = new Transaction(doc, "Move Wall")) { trans.Start(); try { // Perform the move operation wallLocationCurve.Move(translationVec); trans.Commit(); return true; } catch { // Abort the transaction in case of any errors trans.RollBack(); return false; } } } }
20c70ae562fb953ab8eb04b427ff80c9
{ "intermediate": 0.34978488087654114, "beginner": 0.37009167671203613, "expert": 0.28012341260910034 }
37,152
dont allow subclasses to define constructors java
a22cc7f25e5ef423af5f3ee5c628b1be
{ "intermediate": 0.3351244628429413, "beginner": 0.5004855990409851, "expert": 0.16438989341259003 }
37,153
I am writing a greasemonkey(or tampermonkey) script for a page that you type in a prompt, click generate, it shows a progress bar then the image pops up, or an error message pops up, the entire page does not refresh(under normal circumstances) How could I potentially get any of the loading "events" on the page? For now I just want to get everything, and print to the console any actual load events, logging them in such a way to be minimal and if a particular event happens too quickly(more than once a second) it will not log it again during that one second. And finally, I want to be able to know if the entire page refreshes, I assume the greasemonkey script might reload, or "init", but sometimes the website redirects, logging me out of my account, I want to be able to detect and log that as well, not just when I hit F5 to refresh the page myself.
3f5a0f9628b0bbe441e911b623d18b54
{ "intermediate": 0.6421409249305725, "beginner": 0.17258433997631073, "expert": 0.18527479469776154 }
37,154
In revit I want to start http listner and when there is get request with element id, want to move wall with this code, please write complete code to implement it. here is my move wall code [Transaction(TransactionMode.Manual)] [Regeneration(RegenerationOption.Manual)] public class WallMover : IExternalCommand { public Result Execute( ExternalCommandData commandData, ref string message, ElementSet elements) { UIApplication uiApp = commandData.Application; Document doc = uiApp.ActiveUIDocument.Document; // In a real application, you would get the wall by selection or some other method. // For the example purpose, we are fetching the first wall we find. //ElementId wallId = 1597039; // Replace with your known ElementId //Wall wall = doc.GetElement(wallId) as Wall; int elementIdToFind = 1597039; ElementId elementId = new ElementId(Convert.ToInt64(elementIdToFind)); Wall wall = doc.GetElement(elementId) as Wall; //Wall wall = new FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().Cast<Wall>().FirstOrDefault(); if (wall == null) { message = "No wall found."; return Result.Failed; } bool result = MoveUsingLocationCurve(doc, wall); if (result) { return Result.Succeeded; } else { message = "Wall could not be moved."; return Result.Failed; } } private bool MoveUsingLocationCurve(Document doc, Wall wall) { // Check if the wall has a valid location curve LocationCurve wallLocationCurve = wall.Location as LocationCurve; if (wallLocationCurve == null) { return false; } XYZ translationVec = new XYZ(10, 20, 0); // Start a new transaction for modifying the document using (Transaction trans = new Transaction(doc, "Move Wall")) { trans.Start(); try { // Perform the move operation wallLocationCurve.Move(translationVec); trans.Commit(); return true; } catch { // Abort the transaction in case of any errors trans.RollBack(); return false; } } } }
5550af634840e3011f116f936a33d7a8
{ "intermediate": 0.3540850579738617, "beginner": 0.3674536347389221, "expert": 0.27846139669418335 }
37,155
Consumer that returns nothing in java
9d17ec1a30528851b7534af181d7fa31
{ "intermediate": 0.31761929392814636, "beginner": 0.4522164762020111, "expert": 0.2301642894744873 }
37,156
I use consumers. I just want the user to be able to receive a callback when completed when calling a method. Like this; method((object) -> {});
ce1bd9fc047c09da818f49e741e87c4e
{ "intermediate": 0.3822607398033142, "beginner": 0.3294840157032013, "expert": 0.2882552146911621 }
37,157
Java. "I use consumers. I just want the user to be able to receive a callback when completed when calling a method. Like this; method((object) -> {});"
5461a76df9a7b5e6587439b211d655a3
{ "intermediate": 0.5711390376091003, "beginner": 0.2339164912700653, "expert": 0.19494445621967316 }
37,158
reprase it...I am analysing the current code structure for the push notification process and investigating how to add Feign client configuration dependency into the Beam batch service
e21fd04d1a3549aa138e69ba95999238
{ "intermediate": 0.499702125787735, "beginner": 0.16528035700321198, "expert": 0.33501750230789185 }
37,159
hi
8e8533749af37965d32fe04a35ae1cd1
{ "intermediate": 0.3246487081050873, "beginner": 0.27135494351387024, "expert": 0.40399640798568726 }
37,160
Correct 'scope' usage in code snippet
1d4a9e7f57310a511db11c5a3ceb646c
{ "intermediate": 0.3007506728172302, "beginner": 0.47141239047050476, "expert": 0.22783701121807098 }
37,161
Correct 'subnetwork' usage in class EnmCli. Unresolved reference 'subnetworks' error
f4c7fd981d22aea0fdf1f39797c60689
{ "intermediate": 0.36179119348526, "beginner": 0.38685357570648193, "expert": 0.25135523080825806 }
37,162
it doens't follow the cursor: <script lang="tsx"> import { defineComponent, onMounted, ref } from '@vue/composition-api'; const DuckCursorChaser = defineComponent({ setup() { const duckStyle = ref({ top: '0px', left: '0px' }); const duckImgUrl = 'images/logo-dark.png'; // Update this with your duck image path const handleMouseMove = (event: MouseEvent) => { // Update the duck's position, make sure to add some offset if needed so it follows the cursor nicely duckStyle.value = { top: `${event.clientY - 20}px`, // Corrected syntax left: `${event.clientX - 20}px`, // Corrected syntax }; }; onMounted(() => { window.addEventListener('mousemove', handleMouseMove); }); // Optionally, remember to remove the event listener when the component is destroyed // to avoid memory leaks onBeforeUnmount(() => { window.removeEventListener('mousemove', handleMouseMove); }); return { duckStyle, duckImgUrl, }; }, render() { const { duckStyle, duckImgUrl } = this; return <img src={duckImgUrl} style={duckStyle.value} class="duck-cursor-chaser" alt="Duck Chaser" />; }, }); export default DuckCursorChaser; </script> <style scoped> .duck-cursor-chaser { position: absolute; pointer-events: none; transition: top 0.1s, left 0.1s; z-index: 1000; } </style> <Dialog v-model:visible="infoAvProduktVisable" header="Information" :style="{ width: '70%' }" :modal="true"> <div style="text-align: center;"> <label for="name" style="display: inline-block; margin-bottom: 10px; font-weight: bold;">Senaste redigering/tillagd av: <span style="font-weight: normal;">{{ temporary_info[0] }}</span></label> </div> <div style="text-align: center;"> <label for="latill" style="display: inline-block; margin-bottom: 10px; font-weight: bold;">Lades till: <span style="font-weight: normal;">{{ temporary_info[1] }}</span></label> </div> <div style="text-align: center;"> <label for="såldes" style="display: inline-block; margin-bottom: 10px; font-weight: bold;">Såldes: <span style="font-weight: normal;">{{ temporary_info[2] }}</span></label> </div> <div style="text-align: center;"> <Button label="Stäng" style="display: inline-block; margin-bottom: 10px; font-weight: bold;" @click="infoAvProduktVisable = false" /> </div> </Dialog> <DuckCursorChaser />
17ed3d4a94c4a804109e08b6f56a0620
{ "intermediate": 0.2883540987968445, "beginner": 0.4493701159954071, "expert": 0.26227572560310364 }
37,163
Change code using for loop instead of scope=';'.join(subnetworks) were subnetworks is a list of strings
c79a4523573127af1e9ad2dc75508251
{ "intermediate": 0.2495080977678299, "beginner": 0.6282453536987305, "expert": 0.12224651873111725 }
37,164
Write a python code to know the current language used on the computer.
ac2fe664082fad3dc8adbe56212aead7
{ "intermediate": 0.24026212096214294, "beginner": 0.4461725950241089, "expert": 0.31356528401374817 }
37,165
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=5) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 1] if len(self.switch_timestamps) > 10: return True # Too many updates in a short time period return False def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it’s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Update index as before if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index threading.Thread(target=self.queue_image_loading()).start() # Load and display the image in a separate thread self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Call the new queuing function instead of direct load def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.queue_image_loading() self.timer_interval = self.set_timer_interval # Use the stored set timer interval when the image name shows up because i browse too fast, and when i stop pressing fast, and the current (last) image is supposed to be loaded and shown and not put in a queue, how do i make it so that instead of still showing the image name text, it refreshes the image in 500ms to show the current image instead of waiting for me to change image
77446d6364d553d656ca9035969cc50a
{ "intermediate": 0.24793119728565216, "beginner": 0.5077382326126099, "expert": 0.24433057010173798 }
37,166
In revit I want to start http listner and when there is get request with element id, want to move wall with this code, please write complete code to implement it. here is my move wall code [Transaction(TransactionMode.Manual)] [Regeneration(RegenerationOption.Manual)] public class WallMover : IExternalCommand { public Result Execute( ExternalCommandData commandData, ref string message, ElementSet elements) { UIApplication uiApp = commandData.Application; Document doc = uiApp.ActiveUIDocument.Document; // In a real application, you would get the wall by selection or some other method. // For the example purpose, we are fetching the first wall we find. //ElementId wallId = 1597039; // Replace with your known ElementId //Wall wall = doc.GetElement(wallId) as Wall; int elementIdToFind = 1597039; ElementId elementId = new ElementId(Convert.ToInt64(elementIdToFind)); Wall wall = doc.GetElement(elementId) as Wall; //Wall wall = new FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().Cast<Wall>().FirstOrDefault(); if (wall == null) { message = “No wall found.”; return Result.Failed; } bool result = MoveUsingLocationCurve(doc, wall); if (result) { return Result.Succeeded; } else { message = “Wall could not be moved.”; return Result.Failed; } } private bool MoveUsingLocationCurve(Document doc, Wall wall) { // Check if the wall has a valid location curve LocationCurve wallLocationCurve = wall.Location as LocationCurve; if (wallLocationCurve == null) { return false; } XYZ translationVec = new XYZ(10, 20, 0); // Start a new transaction for modifying the document using (Transaction trans = new Transaction(doc, “Move Wall”)) { trans.Start(); try { // Perform the move operation wallLocationCurve.Move(translationVec); trans.Commit(); return true; } catch { // Abort the transaction in case of any errors trans.RollBack(); return false; } } } } write complete code including avove
ae40a454ce42d98d3828c5f13c760330
{ "intermediate": 0.5626368522644043, "beginner": 0.3022714853286743, "expert": 0.13509170711040497 }
37,167
{% block styles %} <link rel="stylesheet" href="{{ url_for('static', filename='admin_evenements.css') }}"> {% endblock %} {% block content %} <a id="retour" href="{{ url_for('menu_admin') }}" class="btn-retour">Retour</a> <h1>Les événements du festival</h1> <table> <thead> <tr> <th>id Evenement</th> <th>id Groupe</th> <th> id Lieu</th> <th>nom Evenement</th> <th>heure de début</th> <th>heure de fin</th> <th>Date de début</th> <th>Date de fin</th> <th>Actions</th> </tr> </thead> <tbody> {% for evenement in liste_evenements %} <tr> <td>{{ evenement.get_idE() }}</td> <td>{{ evenement.get_idG() or "Non attribué" }}</td> <td>{{ evenement.get_idL() or "Non attribué" }}</td> <td>{{ evenement.get_nomE() }}</td> <td>{{ evenement.get_heureDebutE() }}</td> <td>{{ evenement.get_heureFinE() }}</td> <td>{{ evenement.get_dateDebutE() }}</td> <td>{{ evenement.get_dateFinE() }}</td> <td> <button class="btn-modifier" data-id="{{ evenement.get_idE() }}" data-nom = "{{ evenement.get_nomE() }}" data-heureDebut = "{{ evenement.get_heureDebutE() }}" data-heureFin = "{{ evenement.get_heureFinE() }}" data-dateDebut = "{{ evenement.get_dateDebutE() }}" data-dateFin = "{{ evenement.get_dateFinE() }}">Modifier</button> <button class="btn-supprimer" data-id="{{ evenement.get_idE() }}">Supprimer</button> </td> </tr> {% endfor %} </tbody> </table> <!-- Modale pour modifier un évènement --> <div id="modal-modifier" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/modifier_evenement" method="post"> <!-- ID Evenement (caché) --> <input type="hidden" name="id_evenement" id="id_evenement_modifier" value=""> <!-- Nom de l'événement --> <label for="nom_evenement_modifier">Nom de l'événement:</label> <input type="text" name="nom_evenement" id="nom_evenement_modifier" placeholder="Nom de l'événement" required> <!-- Horaire de début --> <label for="heure_debut_modifier">Heure de début:</label> <input type="time" name="heure_debut" id="heure_debut_modifier" required> <!-- Horaire de fin --> <label for="heure_fin_modifier">Heure de fin:</label> <input type="time" name="heure_fin" id="heure_fin_modifier" required> <!-- Date de début --> <label for="date_debut_modifier">Date de début:</label> <input type="date" name="date_debut" id="date_debut_modifier" max="2023-07-23" min="2023-07-21" required> <!-- Date de fin --> <label for="date_fin_modifier">Date de fin:</label> <input type="date" name="date_fin" id="date_fin_modifier" max="2023-07-23" min="2023-07-21" required> <div class="radios"> <label for="type_evenement_concert_modifier">Concert</label> <input type="radio" name="type_evenement" value="concert" id="type_evenement_concert_modifier" required> <label for="type_evenement_activite_modifier">Activité Annexe</label> <input type="radio" name="type_evenement" value="activite" id="type_evenement_activite_modifier" required> </div> <div id="concert_fields_modifier" style="display:none;"> <label for="temps_montage">Temps Montage:</label> <input type="time" name="temps_montage" id="temps_montage_modifier" required> <label for="temps_demontage">Temps Démontage:</label> <input type="time" name="temps_demontage" id="temps_demontage_modifier" required> </div> <div id="activite_fields_modifier" style="display:none;"> <label for="type_activite">Type d'activité:</label> <input type="text" name="type_activite" id="type_activite_modifier" required> <label for="ouvert_public">Ouvert au public:</label> <input type="checkbox" name="ouvert_public" id="ouvert_public" required> </div> <button id="modifier" type="submit">Modifier</button> </form> </div> </div> <!-- Modale pour supprimer un évènement --> <div id="modal-supprimer" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/supprimer_evenement" method="post"> <!-- ID Evenement (caché) --> <input type="hidden" name="id_evenement" id="id_evenement_supprimer" value=""> <p>Êtes-vous sûr de vouloir supprimer cet évènement ?</p> <button id="supprimer" type="submit">Supprimer</button> </form> </div> </div> <!-- Modale pour ajouter un groupe --> <div id="modal-ajouter" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/ajouter_evenement" method="post"> <!-- Nom de l'événement --> <label for="nom_evenement_ajouter">Nom de l'événement:</label> <input type="text" name="nom_evenement" id="nom_evenement_ajouter" placeholder="Nom de l'événement" required> <!-- Horaire de début --> <label for="heure_debut_ajouter">Heure de début:</label> <input type="time" name="heure_debut" id="heure_debut_ajouter" required> <!-- Horaire de fin --> <label for="heure_fin_ajouter">Heure de fin:</label> <input type="time" name="heure_fin" id="heure_fin_ajouter" required> <!-- Date de début --> <label for="date_debut_ajouter">Date de début:</label> <input type="date" name="date_debut" id="date_debut_ajouter" max="2023-07-23" min="2023-07-21" required> <!-- Date de fin --> <label for="date_fin_ajouter">Date de fin:</label> <input type="date" name="date_fin" id="date_fin_ajouter" max="2023-07-23" min="2023-07-21" required> <div class="radios"> <label for="type_evenement_concert_ajouter">Concert</label> <input type="radio" name="type_evenement" value="concert" id="type_evenement_concert_ajouter" required> <label for="type_evenement_activite_ajouter">Activité Annexe</label> <input type="radio" name="type_evenement" value="activite" id="type_evenement_activite_ajouter" required> </div> <div id="concert_fields" style="display:none;"> <label for="temps_montage">Temps Montage:</label> <input type="time" name="temps_montage" id="temps_montage"> <label for="temps_demontage">Temps Démontage:</label> <input type="time" name="temps_demontage" id="temps_demontage"> </div> <div id="activite_fields" style="display:none;"> <label for="type_activite">Type d'activité:</label> <input type="text" name="type_activite" id="type_activite"> <label for="ouvert_public">Ouvert au public:</label> <input type="checkbox" name="ouvert_public" id="ouvert_public"> </div> <button class="btn-ajouter" type="submit">Ajouter</button> </form> </div> </div> <button id="ajouter">Ajouter</button> <script> document.addEventListener("DOMContentLoaded", function() { var modalModifier = document.getElementById("modal-modifier"); var modalSupprimer = document.getElementById("modal-supprimer"); var modalAjouter = document.getElementById("modal-ajouter"); var btnClose = document.querySelectorAll(".close-button"); btnClose.forEach(function(btn) { btn.onclick = function() { btn.closest(".modal").style.display = "none"; }; }); document.querySelectorAll(".btn-modifier").forEach(function(btn) { btn.onclick = function() { document.getElementById("id_evenement_modifier").value = btn.getAttribute("data-id"); document.getElementById("nom_evenement_modifier").value = btn.getAttribute("data-nom"); document.getElementById("heure_debut_modifier").value = btn.getAttribute("data-heureDebut"); document.getElementById("heure_fin_modifier").value = btn.getAttribute("data-heureFin"); document.getElementById("date_debut_modifier").value = btn.getAttribute("data-dateDebut"); document.getElementById("date_fin_modifier").value = btn.getAttribute("data-dateFin"); modalModifier.style.display = "block"; }; }); document.querySelectorAll(".btn-supprimer").forEach(function(btn) { btn.onclick = function() { document.getElementById("id_evenement_supprimer").value = btn.getAttribute("data-id"); modalSupprimer.style.display = "block"; }; }); document.getElementById("ajouter").onclick = function() { modalAjouter.style.display = "block"; }; window.onclick = function(event) { if (event.target.classList.contains("modal")) { event.target.style.display = "none"; } } function toggleEventDetails(eventType) { var concertFields = document.getElementById('concert_fields'); var activiteFields = document.getElementById('activite_fields'); concertFields.style.display = 'none'; activiteFields.style.display = 'none'; if(eventType === 'concert') { concertFields.style.display = 'block'; } else if (eventType === 'activite') { activiteFields.style.display = 'block'; } } document.getElementById('type_evenement_concert_ajouter').addEventListener('change', function() { if(this.checked) toggleEventDetails('concert'); }); document.getElementById('type_evenement_activite_ajouter').addEventListener('change', function() { if(this.checked) toggleEventDetails('activite'); }); function toggleEventDetailsModify(eventType) { var concertFields = document.getElementById("concert_fields_modifier"); var activiteFields = document.getElementById("activite_fields_modifier"); concertFields.style.display = "none"; activiteFields.style.display = "none"; if (eventType === "concert") { concertFields.style.display = "block"; } else if (eventType === "activite") { activiteFields.style.display = "block"; } } document.getElementById("type_evenement_concert_modifier").addEventListener("change", function() { if (this.checked) toggleEventDetails("concert"); }); document.getElementById('type_evenement_activite_modifier').addEventListener('change', function() { if(this.checked) toggleEventDetails('activite'); }); }); </script> {% endblock %} comment différencier le fait qu'un évènement soit un concert ou une activité annexe dans le modal modifier ? J'aimerais que ça affiche déjà les informations et les fields pour ce modal, et si c'est un concert on retire la radio activite annexe et inversement
01f0f5497bb29e4a2fafd2bd07f8ec4f
{ "intermediate": 0.4351615309715271, "beginner": 0.4395754337310791, "expert": 0.1252630352973938 }
37,168
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 2] if len(self.switch_timestamps) > 10: # Schedule the delayed display of the current image self.display_current_image_with_delay() return True # We"re indicating "too many updates" return False # Continue as normal if under the threshold def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Update index as before if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index self.display_current_image_with_delay() # Schedule image display with delay self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Call the new queuing function instead of direct load def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_current_image_with_delay() # Schedule image display with delay self.timer_interval = self.set_timer_interval # Reset the timer interval def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(200, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() i want to make this feel more snappy. what would make the image browsing for next and previous in rapid, and hold press show more snappy. i'm thinking of setting certain queue of rapid keypress value. or how to make it so that when it's only the image name that is shown, i can browse through the image name text without waiting for queue or delay. then when it's time to load the last image, it uses the delayed loading of image already in the code above.
527becf871f1bb478788706c3098139f
{ "intermediate": 0.24868446588516235, "beginner": 0.5101708173751831, "expert": 0.24114468693733215 }
37,169
Unresolved reference 'responses'. How would I concatenate response.get_output() knowing that data type is class enmscripting.common.element.ElementGroup. I do not want to change data type
92c062f327ff3d0b4a0e97a2acdbdcf5
{ "intermediate": 0.4852372109889984, "beginner": 0.29908156394958496, "expert": 0.21568119525909424 }
37,170
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) # Clean up old timestamps threshold_time = 1 self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] rapid_browsing = len(self.switch_timestamps) > 6 # Adjust this threshold as needed if rapid_browsing: # Schedule the delayed display of the current image self.display_current_image_with_delay() self.switch_timestamps.clear() # Clear timestamps to reset rapid browsing detection return rapid_browsing def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index + 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Reset the timer interval def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Reset the timer interval def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(300, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() it seems like rapid pressing triggers the “only show name” but long pressing still loads every image. i want it so that when i rapid press, and long press it shows only the name instead of loading the image until i let go, the it loads the latest current image. i want it to still process rapid and long presses but when it above a limit like queue or too much button presses it only show the image name, how can i improve that, can you tell me exactly what to change and where
1683b884996cbfde58eb7acf81622bf4
{ "intermediate": 0.24868446588516235, "beginner": 0.5101708173751831, "expert": 0.24114468693733215 }
37,171
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.is_button_press_in_progress = False # Variable to track if button is pressed self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) self.root.bind("<KeyRelease-Left>", self.release_button_press) self.root.bind("<KeyRelease-Right>", self.release_button_press) def manage_button_press(self): self.is_button_press_in_progress = True if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) def release_button_press(self, event=None): self.is_button_press_in_progress = False # Schedule the delayed display of the last intended image self.delayed_display_image_timer = self.root.after(100, self.display_current_image) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Do not call next_image(); instead reset index and display the image if it is the time for a slide change if not self.random_next_image_var.get(): self.current_image_index = (self.current_image_index + 1) % len(self.image_files) else: self.current_image_index = random.randint(0, len(self.image_files) - 1) self.display_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and self.image_files: if self.should_load_image(): # We should immediately load the image image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() else: # Handling rapid browsing or long press by showing only image name image_name = self.image_files[self.current_image_index] self.show_image_name(image_name) def show_image_name(self, name): self.canvas.delete("all") self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=name, fill="white" ) def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) # Clean up old timestamps threshold_time = 1 self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] rapid_browsing = len(self.switch_timestamps) > 6 # Adjust this threshold as needed if rapid_browsing: # Schedule the delayed display of the current image self.display_current_image_with_delay() self.switch_timestamps.clear() # Clear timestamps to reset rapid browsing detection return rapid_browsing def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): self.manage_button_press() if self.image_folder != "" and self.image_files: if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) self.display_image() def previous_image(self, event=None): self.manage_button_press() # Indicate start of button press if self.image_folder != "" and self.image_files: # Ensure we have images to handle if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(300, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() def should_load_image(self): threshold_rapid_presses = 6 # after how many rapid presses to display the image name threshold_time = 1 # how long to consider presses as "rapid" in seconds current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] # Reset timer interval self.timer_interval = self.set_timer_interval # Check if rapid or long pressing is occurring if len(self.switch_timestamps) > threshold_rapid_presses or self.is_button_press_in_progress: # Schedule the delayed display of the current image self.display_current_image_with_delay() return False return True on the next and previous button presses. it just load image name and stays there instead of loading the next image name or show image. it's supposed to load and show images immediately on next and previous press, even when rapidly pressed or held, but only if it's too much, it loads just the image name of the current image it's supposed to be showing and browse that way until the key press is released, or when there's not too much in queue. so the behaviour that i'm looking for is it loads images immediately on next and previous button press but if it's too much on the queue or too rapid, it only show image name as i browse through snappy, then when i stop pressing at the current image, it refreshes and load the image as it's supposed to. can you just give me the complete fixed code with that behaviour with a more neat arrangement and proper comment on each method.
69a7e978eaffea4b520abf34133aa18d
{ "intermediate": 0.2645386755466461, "beginner": 0.5725326538085938, "expert": 0.16292870044708252 }
37,172
Hello, can you please fix this code
4bc1e9e0e51de7d001701d7124ee3a9f
{ "intermediate": 0.27551281452178955, "beginner": 0.42168477177619934, "expert": 0.3028023838996887 }
37,173
how to interact with revit from external python script
8aa269422e1b58597f3b31ca4ffa6c24
{ "intermediate": 0.5648854970932007, "beginner": 0.16275988519191742, "expert": 0.2723546028137207 }
37,174
correct this code on Haskell pls fac1 :: (Integral a) => a -> a fac1 n = product [1..n] -- Вариант 2: fac2 :: (Integral a) => a -> a fac2 0 = 1 fac2 n = n * fac2 (n - 1) main :: IO () main = do putStrLn "Введите число для вычисления факториала: " input <- getLine let xs = fac1 input let xy = fac2 input putStrLn $ "Вычисленный факториал 1 = " ++ show xs putStrLn $ "Вычисленный факториал 2 = " ++ show xy
b78dfe67700c40eb6d4677227b8fa6a4
{ "intermediate": 0.3621259331703186, "beginner": 0.4418830871582031, "expert": 0.19599094986915588 }
37,175
correct this code please fac1 :: (Integral a) => a -> a fac1 n = product [1..n] -- Вариант 2: fac2 :: (Integral a) => a -> a fac2 0 = 1 fac2 n = n * fac2 (n - 1) main :: IO () main = do putStrLn "Введите число для вычисления факториала: " input <- getLine let xs = fac1 input let xy = fac2 input putStrLn $ "Вычисленный факториал 1 = " ++ show xs putStrLn $ "Вычисленный факториал 2 = " ++ show xy
c358431896360285ce73a0177fba6ff0
{ "intermediate": 0.31203576922416687, "beginner": 0.44120150804519653, "expert": 0.2467627078294754 }
37,176
correct fac1 :: (Integral a) => a -> a fac1 n = product [1..n] -- Вариант 2: fac2 :: (Integral a) => a -> a fac2 0 = 1 fac2 n = n * fac2 (n - 1) main :: IO () main = do putStrLn "Введите число для вычисления факториала: " input <- getLine let xs = fac1 input let xy = fac2 input putStrLn $ "Вычисленный факториал 1 = " ++ show xs putStrLn $ "Вычисленный факториал 2 = " ++ show xy
a813bb384506b0726a7c3670d3b60b54
{ "intermediate": 0.3228982388973236, "beginner": 0.4013490378856659, "expert": 0.2757526636123657 }
37,177
correct please fac1 :: (Integral a) => a -> a fac1 n = product [1..n] -- Вариант 2: fac2 :: (Integral a) => a -> a fac2 0 = 1 fac2 n = n * fac2 (n - 1) main :: IO () main = do putStrLn "Введите число для вычисления факториала: " input <- getLine let xs = fac1 input let xy = fac2 input putStrLn $ "Вычисленный факториал 1 = " ++ show xs putStrLn $ "Вычисленный факториал 2 = " ++ show xy
9906c39e2f53d0188fcca36b5adcbea8
{ "intermediate": 0.2966090738773346, "beginner": 0.3909032642841339, "expert": 0.3124876618385315 }
37,178
-- Решение на Haskell задачи №3: -- Вычисление факториала на языке Haskell. -- По сути - это своего рода Haskell "Hello World!" -- Вариант 1: fac1 :: (Integral a) => a -> a fac1 n = product [1..n] -- Вариант 2: fac2 :: (Integral a) => a -> a fac2 0 = 1 fac2 n = n * fac2 (n - 1) main :: IO () main = do putStrLn "Введите число для вычисления факториала: " input <- getLine let xs = fac1 input putStrLn $ "Вычисленный факториал 1 = " ++ show xs let xy = fac2 input putStrLn $ "Вычисленный факториал 2 = " ++ show xy
8d8c4e3c3fe5dc28db66a8a10ebbae6a
{ "intermediate": 0.38487449288368225, "beginner": 0.40822291374206543, "expert": 0.2069026529788971 }
37,179
J'ai cette erreur : Fatal error: Uncaught Error: Cannot instantiate abstract class Form in C:\Users\amael\OneDrive\Bureau\cours\php\tp5\index.php:94 Stack trace: #0 {main} thrown in C:\Users\amael\OneDrive\Bureau\cours\php\tp5\index.php on line 94 j'essaie de lancer un projet php à l'aide de ce fichier : <html> <head> <title>Quizz</title> </head> <body> <?php require 'Classes/Quizz/Autoloader.php'; require_once 'Classes/Provider.php'; Autoloader::register(); $provider = new Provider('data/data.json'); $data = $provider->getData(); $question_total = 0; $question_correct = 0; $score_total = 0; $score_correct = 0; function question_text($q) { echo ($q["text"] . "<br><input type='text' name='$q[name]'><br>"); } function answer_text($q, $v) { global $question_correct, $score_total, $score_correct; $score_total += $q["score"]; if (is_null($v)) return; if ($q["answer"] == $v) { $question_correct += 1; $score_correct += $q["score"]; } } function question_radio($q) { $html = $q["text"] . "<br>"; $i = 0; foreach ($q["choices"] as $c) { $i += 1; $html .= "<input type='radio' name='$q[name]' value='$c[value]' id='$q[name]-$i'>"; $html .= "<label for='$q[name]-$i'>$c[text]</label>"; } echo $html; } function answer_radio($q, $v) { global $question_correct, $score_total, $score_correct; $score_total += $q["score"]; if (is_null($v)) return; if ($q["answer"] == $v) { $question_correct += 1; $score_correct += $q["score"]; } } function question_checkbox($q) { $html = $q["text"] . "<br>"; $i = 0; foreach ($q["choices"] as $c) { $i += 1; $html .= "<input type='checkbox' name='$q[name][]' value='$c[value]' id='$q[name]-$i'>"; $html .= "<label for='$q[name]-$i'>$c[text]</label>"; } echo $html; } function answer_checkbox($q, $v) { global $question_correct, $score_total, $score_correct; $score_total += $q["score"]; if (is_null($v)) return; $diff1 = array_diff($q["answer"], $v); $diff2 = array_diff($v, $q["answer"]); if (count($diff1) == 0 && count($diff2) == 0) { $question_correct += 1; $score_correct += $q["score"]; } } $question_handlers = array( "text" => "question_text", "radio" => "question_radio", "checkbox" => "question_checkbox" ); $answer_handlers = array( "text" => "answer_text", "radio" => "answer_radio", "checkbox" => "answer_checkbox" ); if ($_SERVER["REQUEST_METHOD"] == "GET") { require "Action/Form.php"; $form = new Form($data); $form->buildForm(); } else { echo "<br>Nombre de questions : $question_total<br>"; echo "Nombre de bonnes réponses : $question_correct<br>"; echo "Score total : $score_total<br>"; echo "Score obtenu : $score_correct<br>"; } ?> </body> </html>
7aae8e9354c75be4cf9d84e7296524d5
{ "intermediate": 0.275860458612442, "beginner": 0.6333644390106201, "expert": 0.09077508747577667 }
37,180
#madebyr import tkinter as tk from tkinter import filedialog, simpledialog, messagebox from PIL import Image, ImageTk import piexif import os import random import threading import time from queue import Queue import threading class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.is_button_press_in_progress = False # Variable to track if button is pressed self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) self.root.bind("<KeyRelease-Left>", self.release_button_press) self.root.bind("<KeyRelease-Right>", self.release_button_press) def manage_button_press(self): self.is_button_press_in_progress = True if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) def release_button_press(self, event=None): self.is_button_press_in_progress = False # Schedule the delayed display of the last intended image self.delayed_display_image_timer = self.root.after(100, self.display_current_image) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Do not call next_image(); instead reset index and display the image if it is the time for a slide change if not self.random_next_image_var.get(): self.current_image_index = (self.current_image_index + 1) % len(self.image_files) else: self.current_image_index = random.randint(0, len(self.image_files) - 1) self.display_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and self.image_files: if self.should_load_image(): # We should immediately load the image image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() else: # Handling rapid browsing or long press by showing only image name image_name = self.image_files[self.current_image_index] self.show_image_name(image_name) def show_image_name(self, name): self.canvas.delete("all") self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=name, fill="white" ) def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) # Clean up old timestamps threshold_time = 1 self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] rapid_browsing = len(self.switch_timestamps) > 6 # Adjust this threshold as needed if rapid_browsing: # Schedule the delayed display of the current image self.display_current_image_with_delay() self.switch_timestamps.clear() # Clear timestamps to reset rapid browsing detection return rapid_browsing def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): self.manage_button_press() if self.image_folder != "" and self.image_files: if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) self.display_image() def previous_image(self, event=None): self.manage_button_press() # Indicate start of button press if self.image_folder != "" and self.image_files: # Ensure we have images to handle if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(300, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() def should_load_image(self): threshold_rapid_presses = 6 # after how many rapid presses to display the image name threshold_time = 1 # how long to consider presses as "rapid" in seconds current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] # Reset timer interval self.timer_interval = self.set_timer_interval # Check if rapid or long pressing is occurring if len(self.switch_timestamps) > threshold_rapid_presses or self.is_button_press_in_progress: # Schedule the delayed display of the current image self.display_current_image_with_delay() return False return True if __name__ == "__main__": root = tk.Tk() image_viewer = ImageViewer(root) root.mainloop() on the next and previous button presses. it just load image name and stays there instead of loading the next image name or show image. it’s supposed to load and show images immediately on next and previous press, even when rapidly pressed or held, but only if it’s too much, it loads just the image name of the current image it’s supposed to be showing and browse that way until the key press is released, or when there’s not too much in queue. so the behaviour that i’m looking for is it loads images immediately on next and previous button press but if it’s too much on the queue or too rapid, it only show image name as i browse through snappy, then when i stop pressing at the current image, it refreshes and load the image as it’s supposed to. can you just give me the complete fixed code with that behaviour with a more neat arrangement and comment included on each method. i want the new and improved code entirely in full without any parts cut off so i can just replace the entire code from beginning to end instead of replacing parts of it.
ed7465c48ec224ffcaf2efc708383bda
{ "intermediate": 0.25808340311050415, "beginner": 0.5864339470863342, "expert": 0.1554826945066452 }
37,181
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.is_button_press_in_progress = False # Variable to track if button is pressed self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) self.root.bind("<KeyRelease-Left>", self.release_button_press) self.root.bind("<KeyRelease-Right>", self.release_button_press) def manage_button_press(self): self.is_button_press_in_progress = True if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) def release_button_press(self, event=None): self.is_button_press_in_progress = False # Schedule the delayed display of the last intended image self.delayed_display_image_timer = self.root.after(100, self.display_current_image) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Do not call next_image(); instead reset index and display the image if it is the time for a slide change if not self.random_next_image_var.get(): self.current_image_index = (self.current_image_index + 1) % len(self.image_files) else: self.current_image_index = random.randint(0, len(self.image_files) - 1) self.display_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and self.image_files: if self.should_load_image(): # We should immediately load the image image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() else: # Handling rapid browsing or long press by showing only image name image_name = self.image_files[self.current_image_index] self.show_image_name(image_name) def show_image_name(self, name): self.canvas.delete("all") self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=name, fill="white" ) def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) # Clean up old timestamps threshold_time = 1 self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] rapid_browsing = len(self.switch_timestamps) > 6 # Adjust this threshold as needed if rapid_browsing: # Schedule the delayed display of the current image self.display_current_image_with_delay() self.switch_timestamps.clear() # Clear timestamps to reset rapid browsing detection return rapid_browsing def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): self.manage_button_press() if self.image_folder != "" and self.image_files: if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) self.display_image() def previous_image(self, event=None): self.manage_button_press() # Indicate start of button press if self.image_folder != "" and self.image_files: # Ensure we have images to handle if self.should_load_image(): if self.random_next_image_var.get(): # Skip random selection if navigating through images too rapidly or holding the button too long self.current_image_index = random.randint(0, len(self.image_files) - 1) else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(300, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() def should_load_image(self): threshold_rapid_presses = 6 # after how many rapid presses to display the image name threshold_time = 0.5 # how long to consider presses as "rapid" in seconds current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] # Reset timer interval self.timer_interval = self.set_timer_interval # Check if rapid or long pressing is occurring if len(self.switch_timestamps) > threshold_rapid_presses or self.is_button_press_in_progress: # Schedule the delayed display of the current image self.display_current_image_with_delay() return False return True the next and previous button is broken, can you make it so it loads next and previous images, but only show the image name in text when it's too much on the queue, pressed too long or too rapid. i want to still browse by long press, and rapid press. but only if it's too fast or too much images to be processed it just shows the image name temporarily while i can keep browsing pressing next or previous
f0f6d36ab1a029f96dbd67e7aadb7ed3
{ "intermediate": 0.2645386755466461, "beginner": 0.5725326538085938, "expert": 0.16292870044708252 }
37,182
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=3) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) # Clean up old timestamps threshold_time = 1 self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= threshold_time] rapid_browsing = len(self.switch_timestamps) > 6 # Adjust this threshold as needed if rapid_browsing: # Schedule the delayed display of the current image self.display_current_image_with_delay() self.switch_timestamps.clear() # Clear timestamps to reset rapid browsing detection return rapid_browsing def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it"s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index + 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Reset the timer interval def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Reset the timer interval def display_current_image_with_delay(self): # Cancel any previous image display attempts if hasattr(self, "delayed_display_image_timer"): self.root.after_cancel(self.delayed_display_image_timer) # Schedule a new display attempt for the current image after the delay self.delayed_display_image_timer = self.root.after(300, self.display_current_image) def display_current_image(self): # Call the original method to load and display the current image self.display_image() can you fix the random functionality and give me the fixed next and previous method
745021cc9bac1d56862636f12e7e4c05
{ "intermediate": 0.24868446588516235, "beginner": 0.5101708173751831, "expert": 0.24114468693733215 }
37,183
can you speak polish
5c64b19b4ad73886b0933618e95d0afe
{ "intermediate": 0.4182800352573395, "beginner": 0.30436035990715027, "expert": 0.27735957503318787 }
37,184
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} self.image_loading_queue = Queue(maxsize=5) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#808080", fg="#808080", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#e8e8e8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 1] if len(self.switch_timestamps) > 10: return True # Too many updates in a short time period return False def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it’s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Update index as before if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index threading.Thread(target=self.queue_image_loading()).start() # Load and display the image in a separate thread self.timer_interval = self.set_timer_interval # Use the stored set timer interval # Call the new queuing function instead of direct load def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Check if rapid browsing update is needed if self.random_next_image_var.get(): # If True, select a random image self.current_image_index = random.randint(0, len(self.image_files) - 1) else: # Otherwise, move to the previous image in the list self.current_image_index = (self.current_image_index - 1) % len(self.image_files) threading.Thread(target=self.queue_image_loading()).start() # Load and display the image in a separate thread self.timer_interval = self.set_timer_interval # Reset the timer interval when i press next or previous while the timer is running it stops the timer. how to fix it so that the timer just resets on a new image when pressing next or previous while the slideshow has started
974d8a93a084be11f1d244b65e8681e0
{ "intermediate": 0.2637064754962921, "beginner": 0.5606353282928467, "expert": 0.1756581813097 }
37,185
Add iteration for s in subnetworks_enm2 inside output2
a57757d48b77ceffb3a15650df1c4bdb
{ "intermediate": 0.30005884170532227, "beginner": 0.2522086203098297, "expert": 0.44773250818252563 }
37,186
Correct iteration for s in subnetworks_enm2. It should iterate for each baseline for each subnetwork
200f2225764b4687656a3cfa3c7b073c
{ "intermediate": 0.29876774549484253, "beginner": 0.2435874491930008, "expert": 0.4576447904109955 }
37,187
def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 1] if len(self.switch_timestamps) > 10: return True # Too many updates in a short time period return False def queue_image_loading(self): if self.image_loading_queue.full(): # Clear the queue if it’s full with self.image_loading_queue.mutex: self.image_loading_queue.queue.clear() self.image_loading_queue.put(self.current_image_index) self.process_image_loading_queue() def process_image_loading_queue(self): if not self.image_loading_queue.empty(): # Only process the last item in the queue while self.image_loading_queue.qsize() > 1: self.image_loading_queue.get() last_index = self.image_loading_queue.get() self.root.after_cancel(self.timer) if self.timer else None self.current_image_index = last_index threading.Thread(target=self.display_image).start() self.timer_interval = self.set_timer_interval def next_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Update index as before if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index target=self.queue_image_loading() # Call the queuing function instead of direct load self.timer_interval = self.set_timer_interval # Use the stored set timer interval def previous_image(self, event=None): if self.image_folder != "" and not self.update_switch_timestamps(): # Check if rapid browsing update is needed if self.random_next_image_var.get(): # If True, select a random image self.current_image_index = random.randint(0, len(self.image_files) - 1) else: # Otherwise, move to the previous image in the list self.current_image_index = (self.current_image_index - 1) % len(self.image_files) target=self.queue_image_loading() # Call the queuing function instead of direct load self.timer_interval = self.set_timer_interval # Reset the timer interval after rapid press it turns into image name only. can you make it so that in 500ms after i stop the button press it process and loads current image it's supposed to be showing automatically so i don't have to refresh it manually. but only if there's no button press in that 500ms
282394c209b9dd930bb527664f117b56
{ "intermediate": 0.24524745345115662, "beginner": 0.6264713406562805, "expert": 0.12828117609024048 }
37,188
how to setup Named Pipes for communication between Revit and RL model
33394088bd264d969164faea202e7be9
{ "intermediate": 0.19382771849632263, "beginner": 0.10288937389850616, "expert": 0.70328289270401 }
37,189
how to setup Named Pipes for communication between Revit and RL model. Explain in detail step by step
2e9de83880e745dfc3bcdd0344cba853
{ "intermediate": 0.2753635346889496, "beginner": 0.09609949588775635, "expert": 0.6285369396209717 }
37,190
How to make perfect learning video for business processes? for example OTC billing, OTC Cash app
9983e34db22f7acaf4e14c40a2dd9697
{ "intermediate": 0.274766206741333, "beginner": 0.1657101958990097, "expert": 0.5595236420631409 }
37,191
i have dataset images and labels with class [0,1,2] i want to crop the poylgon class 1 and save it in other folder
992e8f4c5bc28c720e6866a5d5c1a41e
{ "intermediate": 0.41031792759895325, "beginner": 0.29962268471717834, "expert": 0.2900593876838684 }
37,192
Unresolved attribute reference 'send' for class 'WhoIsRequest'
7f2cdba490199765a21cd0db9c379182
{ "intermediate": 0.3492426574230194, "beginner": 0.4629245698451996, "expert": 0.187832772731781 }
37,193
how to setup Named Pipes for communication between Revit and RL model. Explain in detail step by step use revit add in in c# to implement from starting to end
78d6953d6e63d8070436989d39cb4d42
{ "intermediate": 0.3836515545845032, "beginner": 0.1147836372256279, "expert": 0.5015648603439331 }
37,194
sprite = codesters.Sprite("wizard", 0, -100) sprite.say("I need to cast some spells.") stage.set_background("castle") def spell_1(): sprite.say("I will summon a meteor from the sky!") stage.wait(2) sprite.say("Abra cadabra!") sprite.turn_left(360) fireball = codesters.Sprite("meteor1", -300, 0) fireball.glide_to(350, 0) def spell_2(): sprite.say("Now I shall make treasure appear out of nowhere!") stage.wait(2) sprite.say("Alakazam!") sprite.turn_left(45) sprite.turn_right(90) sprite.turn_left(45) treasure = codesters.Sprite("treasurechest", -125, -200) def spell_3(): sprite.say("Now I will summon my pet dragon!") stage.wait(2) sprite.say("Hocus Pocus!") sprite.flip_right_left() stage.wait(1) sprite.flip_right_left() pet_dragon = codesters.Sprite("dragon", 175, -100) # Call the functions and add a wait command between each function call spell_1() stage.wait(2) spell_2() stage.wait(2) spell_3() using stage.wait(2) command between each function call.
7bfb1ff44ab83a463a561dcce0ef2549
{ "intermediate": 0.35122615098953247, "beginner": 0.4117874801158905, "expert": 0.23698635399341583 }
37,195
In provided response of this question, how to setup Named Pipes for communication between Revit and RL model. Explain in detail step by step use revit add in in c# to implement from starting to end getting this error 'The system cannot find the path specified.')
ed0678bfe9f8c9ae082a407c7a86326e
{ "intermediate": 0.4705321490764618, "beginner": 0.09364907443523407, "expert": 0.43581879138946533 }
37,196
sprite = codesters.Sprite("wizard", 0, -100) sprite.say("I need to cast some spells.") stage.set_background("castle") def spell_1(): sprite.say("I will summon a meteor from the sky!") stage.wait(2) sprite.say("Abra cadabra!") sprite.turn_left(360) fireball = codesters.Sprite("meteor1", -300, 0) fireball.glide_to(350, 0) def spell_2(): sprite.say("Now I shall make treasure appear out of nowhere!") stage.wait(2) sprite.say("Alakazam!") sprite.turn_left(45) sprite.turn_right(90) sprite.turn_left(45) treasure = codesters.Sprite("treasurechest", -125, -200) def spell_3(): sprite.say("Now I will summon my pet dragon!") stage.wait(2) sprite.say("Hocus Pocus!") sprite.flip_right_left() stage.wait(1) sprite.flip_right_left() pet_dragon = codesters.Sprite("dragon", 175, -100) # Call the functions and add a wait command between each function call spell_1() stage.wait(2) spell_2() stage.wait(2) spell_3() add a stage.wait(2) command to each function call
4d2ced1a00189ca8fe382a27eae420cc
{ "intermediate": 0.37183189392089844, "beginner": 0.3834123909473419, "expert": 0.24475571513175964 }
37,197
stage.set_background("jupiter") sprite = codesters.Sprite("ufo", 125, 100) sprite.set_size(.4) x = -215 size = 35 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 light = codesters.Star(x, 100, 5, size, "gold") x += 50 size -= 5 Refactor the code so that it uses a Loop with Range (for loop) to draw the six stars. The value of the variables x and size should change inside the loop. Think: How do the values of x and size change in the program? (Hint: check the lines after creating a star) Remember, your code should draw the exact same stars. You should delete or comment out the original code.
3a148b5f0c7ccf36734e29ca775a7119
{ "intermediate": 0.33103248476982117, "beginner": 0.486672580242157, "expert": 0.18229494988918304 }
37,198
write complete code in detail for this to test its workflow: using System; using System.IO.Pipes; using Autodesk.Revit.UI; class RevitAddIn { private NamedPipeServerStream pipeServer; private const string pipeName = “RevitToRLPipe”; public Result StartServer(ExternalCommandData commandData, ref string message, ElementSet elements) { // Start the named pipe server in a new thread or as a task to avoid blocking the Revit UI. Task.Run(() => { pipeServer = new NamedPipeServerStream(pipeName, PipeDirection.InOut, 1, PipeTransmissionMode.Byte, PipeOptions.Asynchronous); // Wait for a client to connect pipeServer.WaitForConnection(); // Once the client is connected, you can use StreamReader/StreamWriter or BinaryReader/BinaryWriter // to read/write data from/to the pipe stream. // … Your pipe handling code here }); // The above code starts a server and waits for a client connection in a separate task. // Ensure exception handling and proper disposal of pipeServer when done. return Result.Succeeded; } // A method to stop the server and release resources once the add-in is closed. public void StopServer() { if(pipeServer != null) { if(pipeServer.IsConnected) pipeServer.Disconnect(); pipeServer.Dispose(); } } } Step 2: Set Up Your RL Model as a Pipe Client 1. In your RL model code, which could be written in Python or another language, you need to establish it as a pipe client that connects to the named pipe server you’ve created in your Revit add-in. Here’s an example in Python using the pywin32 library to connect to the named pipe server created above: import win32pipe import win32file def connect_to_pipe(): pipe_name = r’\.\pipe\RevitToRLPipe’ handle = win32file.CreateFile( pipe_name, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, 0, None ) # Now you can read/write to the pipe # … RL model communication logic goes here # Remember to close the handle win32file.CloseHandle(handle) # Execute the connection function connect_to_pipe()
44117eccb5970bb9061d593fe887e2f5
{ "intermediate": 0.33527901768684387, "beginner": 0.35212188959121704, "expert": 0.3125991225242615 }
37,199
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False #Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False elif is_floating == False: gravity = 1 jump_height = -15 # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((0, 0, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
82f0705a70fb0ea28b6e72a8b42ea24c
{ "intermediate": 0.3008674681186676, "beginner": 0.47979027032852173, "expert": 0.2193422168493271 }
37,200
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False #Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False elif is_floating == False: gravity = 1 jump_height = -15 # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((0, 0, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
8505d536567ab0ceedddb69128e92363
{ "intermediate": 0.3008674681186676, "beginner": 0.47979027032852173, "expert": 0.2193422168493271 }
37,201
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Flatten effect flatten_duration = 0.3 # Duration of the flattening effect in seconds flatten_timer = 0 # Timer for the flattening effect # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False # Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False elif is_floating == False: gravity = 1 jump_height = -15 # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground if flatten_timer <= 0: flatten_timer = flatten_duration # Update flatten timer if flatten_timer > 0: flatten_timer -= 1 / 60 # Subtract the time elapsed since the last frame # Draw background screen.fill((0, 0, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if flatten_timer > 0: # Apply flatten effect flatten_scale = 0.5 # Scale factor for the flatten effect pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(flatten_scale * 2 * circle_radius))) elif is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
91bc12490a647ff2965b3b27a6e8a1ed
{ "intermediate": 0.3313119411468506, "beginner": 0.46378809213638306, "expert": 0.20489999651908875 }
37,202
I want Kirby to flatten when hitting the ground after jumping. He should only flatten for 0.3 seconds and spring back into shape afterwards. import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Flatten effect flatten_duration = 0.3 # Duration of the flattening effect in seconds flatten_timer = 0 # Timer for the flattening effect # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False # Floating set up if is_floating: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False else: gravity = 1 jump_height = -15 # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 in_air = False # Circle is on the ground if not is_crouching and circle_y_speed == 0: # Only flatten if not crouching and circle is not moving up (i.e., landed after jumping) flatten_timer = flatten_duration # Update flatten timer if flatten_timer > 0: flatten_timer -= 1 / 60 # Subtract the time elapsed since the last frame # Draw background screen.fill((0, 0, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if flatten_timer > 0 and not is_crouching: # Apply flatten effect flatten_scale = 0.5 # Scale factor for the flatten effect pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(flatten_scale * 2 * circle_radius))) elif is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
b2b78cb7b065660375770ef08b82793f
{ "intermediate": 0.3321513533592224, "beginner": 0.45855510234832764, "expert": 0.20929352939128876 }
37,203
const actions = () => { return [ { text: t('btns.edit'), func: edit, icon: 'edit' }, { text: t('btns.replace'), func: replaceProject, icon: 'replace-project' }, { text: t('Действия с агентом'), func: agentActions, icon: 'text' } ] } const agentActions = () => { return [ { text: t('unit.installAgent'), func: confirmInstallAgent, icon: 'install' }, { text: t('unit.downloadAgent'), func: downloadAgent, icon: 'download' } ] } как написать на vue 3 чтобы при нажатии на agentActions появляется список действий из массива agentActions
48934d460e21a7080a5fb3d5622196f1
{ "intermediate": 0.3447195291519165, "beginner": 0.3341227173805237, "expert": 0.32115769386291504 }
37,204
write move wall method inside This c# when it reads client input : Revit Add-in C# Code: using System; using System.IO; using System.IO.Pipes; using System.Threading.Tasks; using Autodesk.Revit.UI; using Autodesk.Revit.DB; public class RevitAddIn : IExternalCommand { private static NamedPipeServerStream pipeServer; private const string pipeName = “RevitToRLPipe”; public Result Execute(ExternalCommandData commandData, ref string message, ElementSet elements) { // Start the named pipe server in a new thread or as a task to avoid blocking the Revit UI. Task.Run(() => { try { using (pipeServer = new NamedPipeServerStream(pipeName, PipeDirection.InOut, 1, PipeTransmissionMode.Byte, PipeOptions.Asynchronous)) { // Wait for a client to connect pipeServer.WaitForConnection(); try { using (StreamReader reader = new StreamReader(pipeServer)) using (StreamWriter writer = new StreamWriter(pipeServer)) { // You can use StreamReader/StreamWriter or BinaryReader/BinaryWriter to read/write data from/to the pipe stream. // Example: Echo what the client sent string clientInput; while ((clientInput = reader.ReadLine()) != null) { writer.WriteLine(clientInput); writer.Flush(); } } } catch (IOException e) { // Handle error here TaskDialog.Show(“Pipe Error”, e.Message); } } } catch (Exception ex) { // Handle error here TaskDialog.Show(“Pipe Error”, ex.Message); } }); return Result.Succeeded; } } Python Client Code: import win32pipe import win32file import pywintypes def connect_to_pipe(): pipe_name = r’\.\pipe\RevitToRLPipe’ while True: try: handle = win32file.CreateFile( pipe_name, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, 0, None ) break except pywintypes.error as e: if e.args[0] == 2: print(“Pipe is not available, trying again…”) elif e.args[0] == 109: print(“Pipe closed, exiting.”) return # Now you can read/write to/from the pipe try: # Example - Write a message to the server message = “Hello from the RL model!\n” win32file.WriteFile(handle, message.encode()) # Example - Read the server’s response resp = win32file.ReadFile(handle, 64*1024) print(f"Server replied: {resp[1].decode()}") finally: # Remember to close the handle win32file.CloseHandle(handle) # Execute the connection function connect_to_pipe() move method is this: [Transaction(TransactionMode.Manual)] [Regeneration(RegenerationOption.Manual)] public class WallMover : IExternalCommand { public Result Execute( ExternalCommandData commandData, ref string message, ElementSet elements) { UIApplication uiApp = commandData.Application; Document doc = uiApp.ActiveUIDocument.Document; // In a real application, you would get the wall by selection or some other method. // For the example purpose, we are fetching the first wall we find. //ElementId wallId = 1597039; // Replace with your known ElementId //Wall wall = doc.GetElement(wallId) as Wall; int elementIdToFind = 1597039; ElementId elementId = new ElementId(Convert.ToInt64(elementIdToFind)); Wall wall = doc.GetElement(elementId) as Wall; //Wall wall = new FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().Cast<Wall>().FirstOrDefault(); if (wall == null) { message = "No wall found."; return Result.Failed; } bool result = MoveUsingLocationCurve(doc, wall); if (result) { return Result.Succeeded; } else { message = "Wall could not be moved."; return Result.Failed; } } private bool MoveUsingLocationCurve(Document doc, Wall wall) { // Check if the wall has a valid location curve LocationCurve wallLocationCurve = wall.Location as LocationCurve; if (wallLocationCurve == null) { return false; } XYZ translationVec = new XYZ(10, 20, 0); // Start a new transaction for modifying the document using (Transaction trans = new Transaction(doc, "Move Wall")) { trans.Start(); try { // Perform the move operation wallLocationCurve.Move(translationVec); trans.Commit(); return true; } catch { // Abort the transaction in case of any errors trans.RollBack(); return false; } } } }
995100ad9eb85d473c3903a3e5d91c1f
{ "intermediate": 0.3816293179988861, "beginner": 0.3555568754673004, "expert": 0.2628138065338135 }
37,205
How to check if an array is numpy array in python?
754cbb286c6408ba6c3dd1b7f3ff576e
{ "intermediate": 0.5793375968933105, "beginner": 0.11645728349685669, "expert": 0.30420517921447754 }
37,206
write move wall method inside This c# when it reads client input : Revit Add-in C# Code: using System; using System.IO; using System.IO.Pipes; using System.Threading.Tasks; using Autodesk.Revit.UI; using Autodesk.Revit.DB; public class RevitAddIn : IExternalCommand { private static NamedPipeServerStream pipeServer; private const string pipeName = “RevitToRLPipe”; public Result Execute(ExternalCommandData commandData, ref string message, ElementSet elements) { // Start the named pipe server in a new thread or as a task to avoid blocking the Revit UI. Task.Run(() => { try { using (pipeServer = new NamedPipeServerStream(pipeName, PipeDirection.InOut, 1, PipeTransmissionMode.Byte, PipeOptions.Asynchronous)) { // Wait for a client to connect pipeServer.WaitForConnection(); try { using (StreamReader reader = new StreamReader(pipeServer)) using (StreamWriter writer = new StreamWriter(pipeServer)) { // You can use StreamReader/StreamWriter or BinaryReader/BinaryWriter to read/write data from/to the pipe stream. // Example: Echo what the client sent string clientInput; while ((clientInput = reader.ReadLine()) != null) { writer.WriteLine(clientInput); writer.Flush(); } } } catch (IOException e) { // Handle error here TaskDialog.Show(“Pipe Error”, e.Message); } } } catch (Exception ex) { // Handle error here TaskDialog.Show(“Pipe Error”, ex.Message); } }); return Result.Succeeded; } } Python Client Code: import win32pipe import win32file import pywintypes def connect_to_pipe(): pipe_name = r’\.\pipe\RevitToRLPipe’ while True: try: handle = win32file.CreateFile( pipe_name, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, 0, None ) break except pywintypes.error as e: if e.args[0] == 2: print(“Pipe is not available, trying again…”) elif e.args[0] == 109: print(“Pipe closed, exiting.”) return # Now you can read/write to/from the pipe try: # Example - Write a message to the server message = “Hello from the RL model!\n” win32file.WriteFile(handle, message.encode()) # Example - Read the server’s response resp = win32file.ReadFile(handle, 64*1024) print(f"Server replied: {resp[1].decode()}") finally: # Remember to close the handle win32file.CloseHandle(handle) # Execute the connection function connect_to_pipe() move method is this: [Transaction(TransactionMode.Manual)] [Regeneration(RegenerationOption.Manual)] public class WallMover : IExternalCommand { public Result Execute( ExternalCommandData commandData, ref string message, ElementSet elements) { UIApplication uiApp = commandData.Application; Document doc = uiApp.ActiveUIDocument.Document; // In a real application, you would get the wall by selection or some other method. // For the example purpose, we are fetching the first wall we find. //ElementId wallId = 1597039; // Replace with your known ElementId //Wall wall = doc.GetElement(wallId) as Wall; int elementIdToFind = 1597039; ElementId elementId = new ElementId(Convert.ToInt64(elementIdToFind)); Wall wall = doc.GetElement(elementId) as Wall; //Wall wall = new FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().Cast<Wall>().FirstOrDefault(); if (wall == null) { message = "No wall found."; return Result.Failed; } bool result = MoveUsingLocationCurve(doc, wall); if (result) { return Result.Succeeded; } else { message = "Wall could not be moved."; return Result.Failed; } } private bool MoveUsingLocationCurve(Document doc, Wall wall) { // Check if the wall has a valid location curve LocationCurve wallLocationCurve = wall.Location as LocationCurve; if (wallLocationCurve == null) { return false; } XYZ translationVec = new XYZ(10, 20, 0); // Start a new transaction for modifying the document using (Transaction trans = new Transaction(doc, "Move Wall")) { trans.Start(); try { // Perform the move operation wallLocationCurve.Move(translationVec); trans.Commit(); return true; } catch { // Abort the transaction in case of any errors trans.RollBack(); return false; } } } } write complete code to better understand
925047915a9364615e7ab735e742a751
{ "intermediate": 0.3816293179988861, "beginner": 0.3555568754673004, "expert": 0.2628138065338135 }
37,207
I have a webhook that is failing and the error message inside the log file is such: Newtonsoft.Json.JsonReaderException: After parsing a value an unexpected character was encountered: S. Path 'paymentMetaJson', line 1, position 218. at bool Newtonsoft.Json.JsonTextReader.ParsePostValue(bool ignoreComments) at bool Newtonsoft.Json.JsonTextReader.Read() at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.PopulateDictionary(IDictionary dictionary, JsonReader reader, JsonDictionaryContract contract, JsonProperty containerProperty, string id) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateObject(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateValueInternal(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.Deserialize(JsonReader reader, Type objectType, bool checkAdditionalContent) at object Newtonsoft.Json.JsonSerializer.DeserializeInternal(JsonReader reader, Type objectType) at object Newtonsoft.Json.JsonConvert.DeserializeObject(string value, Type type, JsonSerializerSettings settings) at Dictionary<string, object> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.MapConfigurationValues(Dictionary<string, object> item, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 132 at string PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetConfiguredPayload(FunctionOrchestration orchestration, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 116 at ProcessWebhookDto PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetFunctionRequest(FunctionTriggerDetails triggerDetails, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 87 at async Task<(ProcessWebhookDto functionRequest, FunctionTriggerDetails details)> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.ConfigureFunctionRequestAsync(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 77 at async Task PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.Process(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 56 at async Task PayStar.Core.Features.Jobs.Execution.JobExecutionTask<T>.Run(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Execution/JobExecutionTask.cs:line 61 So I went and pulled the payload to see the PaymentMetaJson field to see if I could locate the issue. Here is the paymentMetaJson: { "AccountNumber": "3617843617841", "CustomerNumber": "3617841", "ValidAccount": true, "CustAcctNumber": "3617843617841", "CustomerName": "RALLY\"S HAMBURGER", "PhoneNumber": "8132837275", "ServiceAddress": "1875 BARATARIA BLVD", "AccountStatus": "A", "DueDate": "2024-01-16T06:00:00", "CurrentBalance": "11.53", "CurrentArrears": ".00", "ShutoffFlag": false, "AllowCredit": true, "AllowACH": true, "AllowExtension": true, "Paperless": "true", "AllowPaymentPlan": false, "PPDownPayment": ".00", "PPPaymentAmount": ".00", "PPPaymentNumber": "", "AllowDueDateExtension": "Y", "PastDueAmountMinor": 0, "FormattedArrears": "$0.00", "ShutoffForNonPayment": "N", "AllowPaymentPlanMapped": "N", "Id": 1606619, "AutopaySettingsId": null, "IsArchived": false, "AccountSyncDate": "2024-01-10T13:10:07.1722273", "BlockedByAllPaymentTypes": false, "AccountLocked": false, "BusinessUnitId": 339, "SubAccountNumber": "", "Balance": 1153, "LateFee": 0, "Name": "RALLY\"S HAMBURGER", "Description": null, "EmailAddress": "AirMR50280@engieinsight.com", "Address": { "StreetAddress": "1875 BARATARIA BLVD", "StreetAddress2": null, "City": null, "State": null, "ZipCode": null }, "IssueDate": null, "Note": null, "Meta": "{\"AccountNumber\":\"361784\",\"CustomerNumber\":\"3617841\",\"ValidAccount\":true,\"CustAcctNumber\":\"3617843617841\",\"CustomerName\":\"RALLY\\\"S HAMBURGER\",\"PhoneNumber\":\"8132837275\",\"ServiceAddress\":\"1875 BARATARIA BLVD\",\"AccountStatus\":\"A\",\"DueDate\":\"2024-01-16\",\"CurrentBalance\":\"11.53\",\"CurrentArrears\":\".00\",\"ShutoffFlag\":false,\"AllowCredit\":true,\"AllowACH\":true,\"AllowExtension\":true,\"Paperless\":\"true\",\"AllowPaymentPlan\":false,\"PPDownPayment\":\".00\",\"PPPaymentAmount\":\".00\",\"PPPaymentNumber\":\"\",\"AllowDueDateExtension\":\"Y\",\"PastDueAmountMinor\":0,\"FormattedArrears\":\"$0.00\",\"ShutoffForNonPayment\":\"N\",\"AllowPaymentPlanMapped\":\"N\"}", "CreateMethod": "Api", "AllowOnlinePayment": true, "BlockCreditCardPayment": false, "BlockAchPayment": false, "PaperlessBilling": true, "PaymentDescriptor": "3617843617841", "__LineItems": [ { "PaymentSession": null, "Account": null, "Product": null, "Id": 0, "PaymentSessionId": 0, "LineItemType": "AccountBalance", "Description": "Account Balance", "Price": { "Value": 1153, "Code": "USD" }, "AccountId": 1606619, "ProductId": null } ] }
e88ee28adc32bb89503eaec688cc989c
{ "intermediate": 0.3893991708755493, "beginner": 0.3911087214946747, "expert": 0.2194921374320984 }
37,208
send body in post http request angular
fae8d05d994a794be3b2aae67098bfdd
{ "intermediate": 0.36656808853149414, "beginner": 0.28261250257492065, "expert": 0.3508194088935852 }
37,209
I have a flex container. I want it max-width to be equal 218px. However, inside this container there is a div with a width of 175px. I want flex container to take the width of the div inside automatically, however it shows that the width of the flex cotainer is 218px. How do I fix it?
fc3f58aa4a6558dce4d63d733e7ea0f3
{ "intermediate": 0.4449203908443451, "beginner": 0.2788829207420349, "expert": 0.27619668841362 }
37,210
js that does both of these 2 things 1: document.title = 'Joseph Love - Midterm Study Guide 2024 '; and 2: (function() { var link = document.querySelector("link[rel*='icon']") || document.createElement('link'); link.type = 'image/x-icon'; link.rel = 'shortcut icon'; link.href = 'https://www.gstatic.com/images/branding/product/2x/docs_2020q4_32dp.png'; document.getElementsByTagName('head')[0].appendChild(link); })();
c34c1db23fb6b75ca6ef0d9ed04802e5
{ "intermediate": 0.40737298130989075, "beginner": 0.26549872756004333, "expert": 0.3271283507347107 }
37,211
i have image that is labels with polygon data classes 0,1,2 and classe 0 is in the class 2, i want script that when i crop the class 2 i save the the labels 0 with it
25c6448962caf8a367db0cc49b4cf09b
{ "intermediate": 0.4475160241127014, "beginner": 0.27845096588134766, "expert": 0.2740330398082733 }
37,212
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle circle_radius_float = 35 circle_y_offset_float = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False #Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False circle_radius = circle_radius_float circle_offset = circle_y_offset_float elif is_floating == False: gravity = 1 jump_height = -15 circle_radius = circle_radius circle_y_offset = circle_y_offset # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground circle_radius = circle_radius circle_y_offset = circle_y_offset # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
68007e9a4b7c205d9cce61348df2e44c
{ "intermediate": 0.3326834440231323, "beginner": 0.4451778829097748, "expert": 0.2221386879682541 }
37,213
SSL/TLS: Report Weak Cipher Suites TLS1_RSA_DES_192_CBC3_SHA Remove week cipeer in IIS
b944482070ab88708a071df45034c22d
{ "intermediate": 0.39974090456962585, "beginner": 0.2574446201324463, "expert": 0.34281447529792786 }
37,214
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle circle_radius_float = 35 circle_y_offset_float = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False #Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False circle_radius = circle_radius_float circle_offset = circle_y_offset_float elif is_floating == False: gravity = 1 jump_height = -15 circle_radius = circle_radius circle_y_offset = circle_y_offset # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground circle_radius = circle_radius circle_y_offset = circle_y_offset # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60) A puff of air should spawn where kirby is facing if he is floating and presses r.
3d7cae83f749daa2803f968c1c6e5497
{ "intermediate": 0.3326834440231323, "beginner": 0.4451778829097748, "expert": 0.2221386879682541 }
37,215
i have dataset that has classes 0,1,2 the class 0 is in the class 2, so when i crop the class 2 i want also to save the polygon part of class 0 but i have to adjust for the the crop one "def crop_polygon(image_path, class_id, polygon_coordinates, original_image_path, save_path): with Image.open(original_image_path) as img: img_width, img_height = img.size # Convert normalized coordinates to image coordinates xy_coordinates = [(int(polygon_coordinates[i] * img_width), int(polygon_coordinates[i + 1] * img_height)) for i in range(0, len(polygon_coordinates), 2)] # Calculate bounding box bounding_box = [min(coord[0] for coord in xy_coordinates), min(coord[1] for coord in xy_coordinates), max(coord[0] for coord in xy_coordinates), max(coord[1] for coord in xy_coordinates)] # Calculate the size of the cropped area crop_width = bounding_box[2] - bounding_box[0] crop_height = bounding_box[3] - bounding_box[1] # Create a new image with a transparent background and the size of the cropped area new_img = Image.new('RGBA', (crop_width, crop_height), (255, 255, 255, 0)) # Transparent background # Create an alpha mask using the polygon ImageDraw.Draw(new_img).polygon([(coord[0] - bounding_box[0], coord[1] - bounding_box[1]) for coord in xy_coordinates], outline=(0, 0, 0, 255), fill=(255, 255, 255, 255)) # Paste the cropped polygon onto the new image using the alpha mask new_img.paste(img.crop((bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3])), (0, 0), mask=new_img) # Save the cropped image with a transparent background and bounding box new_img.save(save_path)
58148fd5f0527aeea71204833c098eb6
{ "intermediate": 0.45047563314437866, "beginner": 0.2702513039112091, "expert": 0.27927303314208984 }
37,216
Indent the code properly please import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle circle_radius_float = 35 circle_y_offset_float = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False #Floating set up if is_floating == True: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False circle_radius = circle_radius_float circle_offset = circle_y_offset_float elif is_floating == False: gravity = 1 jump_height = -15 circle_radius = circle_radius circle_y_offset = circle_y_offset # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground circle_radius = circle_radius circle_y_offset = circle_y_offset # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
687e623bc4db51c246f78dbbe6f04825
{ "intermediate": 0.29762008786201477, "beginner": 0.46171650290489197, "expert": 0.24066339433193207 }
37,217
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle circle_radius_float = 35 circle_y_offset_float = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False # Floating set up if is_floating: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False circle_radius = circle_radius_float circle_y_offset = circle_y_offset_float else: gravity = 1 jump_height = -15 circle_radius = circle_radius circle_y_offset = circle_y_offset # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
410b42876058a7a2768dc98142e8793e
{ "intermediate": 0.2598668932914734, "beginner": 0.6063164472579956, "expert": 0.13381659984588623 }
37,218
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle circle_radius_float = 35 circle_y_offset_float = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for crouching and in-air state is_crouching = False crouch_scale = 0.5 # Scale factor for crouching in_air = False # Movement flags is_moving_left = False is_moving_right = False is_floating = False # Flatten variables flatten = False flatten_time = 0.3 # Flatten duration in seconds flatten_timer = 0 # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_s and not is_crouching: is_crouching = True elif event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping flatten = False # Reset flatten when jumping elif event.key == pygame.K_e and not is_crouching: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_s: is_crouching = False elif event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False # Flatten timer if flatten: flatten_timer += pygame.time.get_ticks() / 1000 # Convert ticks to seconds if flatten_timer >= flatten_time: flatten = False flatten_timer = 0 # Floating set up if is_floating: gravity = 0.3 jump_height = -6.5 in_air = False is_crouching = False circle_radius = circle_radius_float circle_y_offset = circle_y_offset_float else: gravity = 1 jump_height = -15 circle_radius = circle_radius circle_y_offset = circle_y_offset # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground # Flatten if jumping if flatten: circle_radius = circle_radius_float * crouch_scale circle_y_offset = circle_y_offset_float * crouch_scale else: circle_radius = circle_radius_float circle_y_offset = circle_y_offset_float flatten_timer = 0 # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_crouching or flatten: pygame.draw.ellipse(screen, kirby_pink_color, (int(circle_x), int(circle_y + circle_radius), int(2 * circle_radius), int(crouch_scale * 2 * circle_radius))) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60)
87c46449b247b71cd07c8453c88e4d1a
{ "intermediate": 0.40097734332084656, "beginner": 0.40413856506347656, "expert": 0.1948840618133545 }
37,219
import pygame import sys pygame.init() # Set up display screen = pygame.display.set_mode((900, 700)) # Set the Kirby pink color (RGB format) kirby_pink_color = (255, 105, 180) ground_color = (0, 255, 0) # Green color for the ground # Circle properties circle_radius = 25 # Initial radius circle_y_offset = 25 # Offset from the top of the circle tf = 35 # Circle position and velocity circle_x, circle_y = 425, 500 circle_x_speed, circle_y_speed = 0, 0 gravity = 1 jump_height = -15 # Set jump height # Variables for floating and in-air state is_floating = False in_air = False # Movement flags is_moving_left = False is_moving_right = False # Game loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_w and not in_air: circle_y_speed = jump_height # Set jump speed in_air = True # Circle is in the air after jumping elif event.key == pygame.K_e: is_floating = True elif event.key == pygame.K_r: is_floating = False elif event.key == pygame.K_a: is_moving_left = True elif event.key == pygame.K_d: is_moving_right = True elif event.type == pygame.KEYUP: if event.key == pygame.K_a: is_moving_left = False elif event.key == pygame.K_d: is_moving_right = False # Floating set up if is_floating: gravity = 0.3 jump_height = -6.5 in_air = False circle_radius = 35 circle_y_offset = 35 else: gravity = 1 jump_height = -15 circle_radius = 25 circle_y_offset = 25 # Apply gravity circle_y_speed += gravity # Apply horizontal motion if is_moving_left: circle_x_speed = -5 elif is_moving_right: circle_x_speed = 5 else: circle_x_speed = 0 # Update circle position circle_x += circle_x_speed circle_y += circle_y_speed # Collision with the ground if circle_y + circle_radius >= 575: circle_y = 575 - circle_radius circle_y_speed = 0 gravity = 1 jump_height = -15 is_floating = False in_air = False # Circle is on the ground # Collision with the sides of the screen if circle_x < 0: circle_x = 0 elif circle_x > 900 - 2 * circle_radius: circle_x = 900 - 2 * circle_radius # Draw background screen.fill((100, 100, 255)) # Blue background # Draw ground pygame.draw.rect(screen, ground_color, (0, 600, 900, 50)) # Draw Kirby pink sphere (circle) if is_floating: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), tf) else: pygame.draw.circle(screen, kirby_pink_color, (int(circle_x + circle_radius), int(circle_y + circle_radius)), circle_radius) # Update the display pygame.display.flip() # Cap the frame rate pygame.time.Clock().tick(60) optimize the code for less lag
8ee1e918c508b3e70bcf74e5780db2f9
{ "intermediate": 0.3860494792461395, "beginner": 0.4031912088394165, "expert": 0.21075929701328278 }
37,220
What's wrong with this code var type Student struct { name string university string }
1bafb8044ccec38a2493801e5a51f145
{ "intermediate": 0.30602556467056274, "beginner": 0.4402373433113098, "expert": 0.25373706221580505 }
37,221
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} #self.image_loading_queue = Queue(maxsize=5) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 2] if len(self.switch_timestamps) > 6: return True # Too many updates in a short time period return False def next_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval def previous_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval can you add a debounce funtion to the diplay text part of the display_image method, then calls self.load_image when the key is no longer pressed. then call off the debounce after self.load_image is called. can it be done using flags? tell me what do i need to add and where specifically
05dd8a30c87609f842e0cecffd0be1e8
{ "intermediate": 0.2713073492050171, "beginner": 0.5500901937484741, "expert": 0.1786024123430252 }
37,222
can you provide me a high level working implementation code framework of cnn bidirectional lstm with attention mechanism for time series forecasting. provide the whole since i don't have any hands to type with. do not assume anything! also include the preprocessing and scaling of data correctly! be accurate and avoid oversight! every mistake and oversight, you owe me 10,000$!
5af62373717d48cdb71cb9afb0aa113f
{ "intermediate": 0.37541672587394714, "beginner": 0.04757559671998024, "expert": 0.5770077109336853 }
37,223
{% block styles %} <link rel="stylesheet" href="{{ url_for('static', filename='admin_evenements.css') }}"> {% endblock %} {% block content %} <a id="retour" href="{{ url_for('menu_admin') }}" class="btn-retour">Retour</a> <h1>Les événements du festival</h1> <table> <thead> <tr> <th>id Evenement</th> <th>id Groupe</th> <th> id Lieu</th> <th>nom Evenement</th> <th>heure de début</th> <th>heure de fin</th> <th>Date de début</th> <th>Date de fin</th> <th>Actions</th> </tr> </thead> <tbody> {% for evenement in liste_evenements %} <tr> <td>{{ evenement.get_idE() }}</td> <td>{{ evenement.get_idG() or "Non attribué" }}</td> <td>{{ evenement.get_idL() }}</td> <td>{{ evenement.get_nomE() }}</td> <td>{{ evenement.get_heureDebutE() }}</td> <td>{{ evenement.get_heureFinE() }}</td> <td>{{ evenement.get_dateDebutE() }}</td> <td>{{ evenement.get_dateFinE() }}</td> <td> <button class="btn-modifier" data-id="{{ evenement.get_idE() }}" data-nom = "{{ evenement.get_nomE() }}" data-heureDebut = "{{ evenement.get_heureDebutE() }}" data-heureFin = "{{ evenement.get_heureFinE() }}" data-dateDebut = "{{ evenement.get_dateDebutE() }}" data-dateFin = "{{ evenement.get_dateFinE() }}" data-type-evenement="{{ 'concert' if evenement in liste_evenements_concerts else "activite" }}">Modifier</button> <button class="btn-supprimer" data-id="{{ evenement.get_idE() }}">Supprimer</button> </td> </tr> {% endfor %} </tbody> </table> <!-- Modale pour modifier un évènement --> <div id="modal-modifier" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/modifier_evenement" method="post"> <!-- ID Evenement (caché) --> <input type="hidden" name="id_evenement" id="id_evenement_modifier" value=""> <!-- Nom de l'événement --> <label for="nom_evenement_modifier">Nom de l'événement:</label> <input type="text" name="nom_evenement" id="nom_evenement_modifier" placeholder="Nom de l'événement" required> <!-- Horaire de début --> <label for="heure_debut_modifier">Heure de début:</label> <input type="time" name="heure_debut" id="heure_debut_modifier" required> <!-- Horaire de fin --> <label for="heure_fin_modifier">Heure de fin:</label> <input type="time" name="heure_fin" id="heure_fin_modifier" required> <!-- Date de début --> <label for="date_debut_modifier">Date de début:</label> <input type="date" name="date_debut" id="date_debut_modifier" max="2023-07-23" min="2023-07-21" required> <!-- Date de fin --> <label for="date_fin_modifier">Date de fin:</label> <input type="date" name="date_fin" id="date_fin_modifier" max="2023-07-23" min="2023-07-21" required> <div class="radios"> <label for="type_evenement_concert_modifier">Concert</label> <input type="radio" name="type_evenement" value="concert" id="type_evenement_concert_modifier" required> <label for="type_evenement_activite_modifier">Activité Annexe</label> <input type="radio" name="type_evenement" value="activite" id="type_evenement_activite_modifier" required> </div> <div id="concert_fields_modifier" style="display:none;"> <label for="temps_montage">Temps Montage:</label> <input type="time" name="temps_montage" id="temps_montage_modifier" required> <label for="temps_demontage">Temps Démontage:</label> <input type="time" name="temps_demontage" id="temps_demontage_modifier" required> </div> <div id="activite_fields_modifier" style="display:none;"> <label for="type_activite">Type d'activité:</label> <input type="text" name="type_activite" id="type_activite_modifier" required> <label for="ouvert_public">Ouvert au public:</label> <input type="checkbox" name="ouvert_public" id="ouvert_public" required> </div> <button id="modifier" type="submit">Modifier</button> </form> </div> </div> <!-- Modale pour supprimer un évènement --> <div id="modal-supprimer" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/supprimer_evenement" method="post"> <!-- ID Evenement (caché) --> <input type="hidden" name="id_evenement" id="id_evenement_supprimer" value=""> <p>Êtes-vous sûr de vouloir supprimer cet évènement ?</p> <button id="supprimer" type="submit">Supprimer</button> </form> </div> </div> <!-- Modale pour ajouter un groupe --> <div id="modal-ajouter" class="modal"> <div class="modal-content"> <span class="close-button">x</span> <form action="/ajouter_evenement" method="post"> <!-- Lieu de l'évènement --> <label for="lieu_evenement_ajouter">Lieu de l'événement:</label> <select name="lieu_evenement" id="lieu_evenement_ajouter" required> <option value="" disabled selected>Choisir un lieu</option> {% for lieu in liste_lieux %} <option value="{{ lieu.get_idL() }}">{{ lieu.get_nomL() }}</option> {% endfor %} </select> <!-- Nom de l'événement --> <label for="nom_evenement_ajouter">Nom de l'événement:</label> <input type="text" name="nom_evenement" id="nom_evenement_ajouter" placeholder="Nom de l'événement" required> <!-- Horaire de début --> <label for="heure_debut_ajouter">Heure de début:</label> <input type="time" name="heure_debut" id="heure_debut_ajouter" required> <!-- Horaire de fin --> <label for="heure_fin_ajouter">Heure de fin:</label> <input type="time" name="heure_fin" id="heure_fin_ajouter" required> <!-- Date de début --> <label for="date_debut_ajouter">Date de début:</label> <input type="date" name="date_debut" id="date_debut_ajouter" max="2023-07-23" min="2023-07-21" required> <!-- Date de fin --> <label for="date_fin_ajouter">Date de fin:</label> <input type="date" name="date_fin" id="date_fin_ajouter" max="2023-07-23" min="2023-07-21" required> <div class="radios"> <label for="type_evenement_concert_ajouter">Concert</label> <input type="radio" name="type_evenement" value="concert" id="type_evenement_concert_ajouter" required> <label for="type_evenement_activite_ajouter">Activité Annexe</label> <input type="radio" name="type_evenement" value="activite" id="type_evenement_activite_ajouter" required> </div> <div id="concert_fields" style="display:none;"> <label for="temps_montage">Temps Montage:</label> <input type="time" name="temps_montage" id="temps_montage"> <label for="temps_demontage">Temps Démontage:</label> <input type="time" name="temps_demontage" id="temps_demontage"> </div> <div id="activite_fields" style="display:none;"> <label for="type_activite">Type d'activité:</label> <input type="text" name="type_activite" id="type_activite"> <label for="ouvert_public">Ouvert au public:</label> <input type="checkbox" name="ouvert_public" id="ouvert_public"> </div> <button class="btn-ajouter" type="submit">Ajouter</button> </form> </div> </div> <button id="ajouter">Ajouter</button> <script> document.addEventListener("DOMContentLoaded", function() { var modalModifier = document.getElementById("modal-modifier"); var modalSupprimer = document.getElementById("modal-supprimer"); var modalAjouter = document.getElementById("modal-ajouter"); var btnClose = document.querySelectorAll(".close-button"); btnClose.forEach(function(btn) { btn.onclick = function() { btn.closest(".modal").style.display = "none"; }; }); document.querySelectorAll(".btn-modifier").forEach(function(btn) { btn.onclick = function() { document.getElementById("id_evenement_modifier").value = btn.getAttribute("data-id"); document.getElementById("nom_evenement_modifier").value = btn.getAttribute("data-nom"); document.getElementById("heure_debut_modifier").value = btn.getAttribute("data-heureDebut"); document.getElementById("heure_fin_modifier").value = btn.getAttribute("data-heureFin"); document.getElementById("date_debut_modifier").value = btn.getAttribute("data-dateDebut"); document.getElementById("date_fin_modifier").value = btn.getAttribute("data-dateFin"); modalModifier.style.display = "block"; }; }); document.querySelectorAll(".btn-supprimer").forEach(function(btn) { btn.onclick = function() { document.getElementById("id_evenement_supprimer").value = btn.getAttribute("data-id"); modalSupprimer.style.display = "block"; }; }); document.getElementById("ajouter").onclick = function() { modalAjouter.style.display = "block"; }; window.onclick = function(event) { if (event.target.classList.contains("modal")) { event.target.style.display = "none"; } } function toggleEventDetails(eventType) { var concertFields = document.getElementById('concert_fields'); var activiteFields = document.getElementById('activite_fields'); concertFields.style.display = 'none'; activiteFields.style.display = 'none'; if(eventType === 'concert') { concertFields.style.display = 'block'; } else if (eventType === 'activite') { activiteFields.style.display = 'block'; } } document.getElementById('type_evenement_concert_ajouter').addEventListener('change', function() { if(this.checked) toggleEventDetails('concert'); }); document.getElementById('type_evenement_activite_ajouter').addEventListener('change', function() { if(this.checked) toggleEventDetails('activite'); }); function toggleEventDetailsModify(eventType) { var concertFields = document.getElementById("concert_fields_modifier"); var activiteFields = document.getElementById("activite_fields_modifier"); concertFields.style.display = "none"; activiteFields.style.display = "none"; if (eventType === "concert") { concertFields.style.display = "block"; } else if (eventType === "activite") { activiteFields.style.display = "block"; } } document.getElementById("type_evenement_concert_modifier").addEventListener("change", function() { if (this.checked) toggleEventDetails("concert"); }); document.getElementById('type_evenement_activite_modifier').addEventListener('change', function() { if(this.checked) toggleEventDetails('activite'); }); }); </script> {% endblock %} j'ai une erreur quand je ne coche pas ouvert au public : werkzeug.exceptions.BadRequestKeyError: 400 Bad Request: The browser (or proxy) sent a request that this server could not understand. KeyError: 'ouvert_public' c'est par rapport à cette ligne dans une vue : ouvert_public = True if request.form["ouvert_public"] else False
136eb578828a116f1aa5c3b8f7d350a7
{ "intermediate": 0.4024428427219391, "beginner": 0.42791128158569336, "expert": 0.16964587569236755 }
37,224
How many 6-digit numbers contain exactly 3 different digits? Write the best code
d5a79043427d36f2da847591a268ae80
{ "intermediate": 0.2156338393688202, "beginner": 0.222048819065094, "expert": 0.5623173713684082 }
37,225
def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def process_image_loading_queue(self): # Get the latest image path and process it # while not self.image_loading_queue.empty(): # This while loop could cause an endless loop if there are no breaks. if not self.image_loading_queue.empty(): image_path = self.image_loading_queue.get_nowait() self._actual_image_loading_logic(image_path) def _actual_image_loading_logic(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white") # Set the debounce timer self.debounce_timer = self.root.after(self.DEBOUNCE_DELAY, self.load_image_debounced) else: # Process the queue and load the image normally if not quick-switching # Queue up the next image to load it. image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) self.load_image(image_path) # Put the image path into the queue def cancel_debounce(self): if self.debounce_timer is not None: self.root.after_cancel(self.debounce_timer) self.debounce_timer = None def load_image_debounced(self): self.debounce_timer = None # Clear the debounce timer ID image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) self.load_image(image_path) def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 2] if len(self.switch_timestamps) > 6: return True # Too many updates in a short time period return False def next_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval def previous_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval it returns File "D:\Programming\py\ssa33worksre.py", line 106, in select_folder self.display_image() File "D:\Programming\py\ssa33worksre.py", line 232, in display_image self.load_image(image_path) # Put the image path into the queue ^^^^^^^^^^^^^^^ AttributeError: 'ImageViewer' object has no attribute 'load_image'
24681b1fddbb10282c42ed2a128e44c1
{ "intermediate": 0.3231710195541382, "beginner": 0.4848922789096832, "expert": 0.1919366866350174 }
37,226
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} #self.image_loading_queue = Queue(maxsize=5) self.switch_timestamps = [] self.debounce_timer = None # Timer ID for debouncing self.DEBOUNCE_DELAY = 1000 # Debounce delay in milliseconds self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white") # Set the debounce timer self.debounce_timer = self.root.after(self.DEBOUNCE_DELAY, self.load_image_debounced) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def cancel_debounce(self): if self.debounce_timer is not None: self.root.after_cancel(self.debounce_timer) self.debounce_timer = None def load_image_debounced(self): self.debounce_timer = None # Clear the debounce timer ID image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 2] if len(self.switch_timestamps) > 6: return True # Too many updates in a short time period return False def next_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval def previous_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval how do i implement a queue so that when load image is called it'll be added into queue and then it'll only process the last of the queue to only process the current last image after letting go of the button. note that i need it to process the text image name like normal, and only add the image loading part to queue
00256207cd4934c3af54d925cffc945c
{ "intermediate": 0.3033581078052521, "beginner": 0.5274574756622314, "expert": 0.16918444633483887 }
37,227
Give me all commands in cmd.
e237cf978e41847aede16b8169a2f7a7
{ "intermediate": 0.35350754857063293, "beginner": 0.39878174662590027, "expert": 0.2477107048034668 }
37,228
Show me how to connect to a local database and run SQL commands using .NET Framework 4.8
50f0ccaa2d59e7989d1b97be4612b978
{ "intermediate": 0.9278432130813599, "beginner": 0.03506970778107643, "expert": 0.037087079137563705 }
37,229
Type mismatch: cannot convert from OBSCommandBuilder<capture#2-of ? extends OBSRequest> to OBSCommandBuilder<T> private Map<String, OBSCommandBuilder<? extends OBSRequest>> callbacks;
8b1b4bdd5e1ac8cb118a433ce9998e3c
{ "intermediate": 0.511832058429718, "beginner": 0.2674228250980377, "expert": 0.2207450270652771 }
37,230
I have the following log file from a Payment Export failure: [2024-01-10T14:15:28.727+00:00 ERR- IP-] An exception occurred mapping configuration values to function payload Newtonsoft.Json.JsonReaderException: After parsing a value an unexpected character was encountered: S. Path 'paymentMetaJson', line 1, position 218. at bool Newtonsoft.Json.JsonTextReader.ParsePostValue(bool ignoreComments) at bool Newtonsoft.Json.JsonTextReader.Read() at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.PopulateDictionary(IDictionary dictionary, JsonReader reader, JsonDictionaryContract contract, JsonProperty containerProperty, string id) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateObject(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateValueInternal(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.Deserialize(JsonReader reader, Type objectType, bool checkAdditionalContent) at object Newtonsoft.Json.JsonSerializer.DeserializeInternal(JsonReader reader, Type objectType) at object Newtonsoft.Json.JsonConvert.DeserializeObject(string value, Type type, JsonSerializerSettings settings) at Dictionary<string, object> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.MapConfigurationValues(Dictionary<string, object> item, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 132 at string PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetConfiguredPayload(FunctionOrchestration orchestration, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 116 at ProcessWebhookDto PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetFunctionRequest(FunctionTriggerDetails triggerDetails, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 87 at async Task<(ProcessWebhookDto functionRequest, FunctionTriggerDetails details)> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.ConfigureFunctionRequestAsync(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 77 at async Task PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.Process(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 56 at async Task PayStar.Core.Features.Jobs.Execution.JobExecutionTask<T>.Run(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Execution/JobExecutionTask.cs:line 61 [2024-01-10T14:15:28.763+00:00 WRN- IP-] JobExecution: Fail - Failed to map configuration values to function payload [2024-01-10T14:15:28.763+00:00 INF- IP-] JobExecution: Set Status - Failed [2024-01-10T14:15:28.765+00:00 ERR- IP-] JobExecution: Exception Newtonsoft.Json.JsonReaderException: After parsing a value an unexpected character was encountered: S. Path 'paymentMetaJson', line 1, position 218. at bool Newtonsoft.Json.JsonTextReader.ParsePostValue(bool ignoreComments) at bool Newtonsoft.Json.JsonTextReader.Read() at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.PopulateDictionary(IDictionary dictionary, JsonReader reader, JsonDictionaryContract contract, JsonProperty containerProperty, string id) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateObject(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.CreateValueInternal(JsonReader reader, Type objectType, JsonContract contract, JsonProperty member, JsonContainerContract containerContract, JsonProperty containerMember, object existingValue) at object Newtonsoft.Json.Serialization.JsonSerializerInternalReader.Deserialize(JsonReader reader, Type objectType, bool checkAdditionalContent) at object Newtonsoft.Json.JsonSerializer.DeserializeInternal(JsonReader reader, Type objectType) at object Newtonsoft.Json.JsonConvert.DeserializeObject(string value, Type type, JsonSerializerSettings settings) at Dictionary<string, object> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.MapConfigurationValues(Dictionary<string, object> item, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 132 at string PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetConfiguredPayload(FunctionOrchestration orchestration, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 116 at ProcessWebhookDto PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.GetFunctionRequest(FunctionTriggerDetails triggerDetails, IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 87 at async Task<(ProcessWebhookDto functionRequest, FunctionTriggerDetails details)> PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.ConfigureFunctionRequestAsync(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 77 at async Task PayStar.Core.Features.Jobs.Tasks.InitiateFunctionOrchestration.Process(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Tasks/InitiateFunctionOrchestration.cs:line 56 at async Task PayStar.Core.Features.Jobs.Execution.JobExecutionTask<T>.Run(IJobExecution execution) in C:/agent/_work/1/s/server/src/PayStar/PayStar.Core/Features/Jobs/Execution/JobExecutionTask.cs:line 61 [2024-01-10T14:15:28.821+00:00 WRN- IP-] JobExecution: Status Can Only Transition Out Of Processing [2024-01-10T14:15:28.821+00:00 INF- IP-] JobExecution: Processed Task {"Type":"InitiateFunctionOrchestration","Status":"Failed"} Can you break it down and help me debug what might be causing this particular job export to fail
728759334ca9880f533f9978acae341a
{ "intermediate": 0.3374621570110321, "beginner": 0.43642571568489075, "expert": 0.22611217200756073 }
37,231
How many integers from 0 to 999 exist that are not divisible by 2, 3, 5, or 7. Write the most efficient code
f83965e8d13f852ecbc1df188623f389
{ "intermediate": 0.34871816635131836, "beginner": 0.3146395981311798, "expert": 0.3366422653198242 }
37,232
what are all topics in this content: Logis&c( Regression( Classifica&on( Machine(Learning( Andrew(Ng( Classifica(on+ Email:(Spam(/(Not(Spam?( Online(Transac&ons:(Fraudulent((Yes(/(No)?( Tumor:(Malignant(/(Benign(?( 0:(“Nega&ve(Class”((e.g.,(benign(tumor)( 1:(“Posi&ve(Class”((e.g.,(malignant(tumor)( Andrew(Ng( Tumor(Size( Threshold(classifier(output(((((((((((((at(0.5:( If((((((((((((((((((((((((,(predict(“y(=(1”( If((((((((((((((((((((((((,(predict(“y(=(0”( Tumor(Size( Malignant(?( (Yes)(1( (No)(0( Andrew(Ng( Classifica&on:((((y(((=(((0(((or(((1( can(be(>(1(or(<(0( Logis&c(Regression:( Logis&c( Regression( Hypothesis( Representa&on( Machine(Learning( Andrew(Ng( Sigmoid(func&on( Logis&c(func&on( Logis(c+Regression+Model+ Want( 1( 0.5( 0( Andrew(Ng( Interpreta(on+of+Hypothesis+Output+ =(es&mated(probability(that(y(=(1(on(input(x(( Tell(pa&ent(that(70%(chance(of(tumor(being(malignant(( Example:((If(( “probability(that(y(=(1,(given(x,( ((parameterized(by((((”( Logis&c( Regression( Decision(boundary( Machine(Learning( Andrew(Ng( Logis(c+regression+ ((Suppose(predict(“((((((((((“(if( ((((predict(“((((((((((“((if( z 1 Andrew(Ng( x1( x2( Decision+Boundary+ 1 2 3 1 2 3 Predict(“((((((((((“(if(( Andrew(Ng( Non?linear+decision+boundaries+ x1( x2( Predict(“((((((((((“(if(( x1( x2( `1 1 `1 1 Logis&c( Regression( Cost(func&on( Machine(Learning( Andrew(Ng( Training( set:( How(to(choose(parameters((((?( m(examples( Andrew(Ng( Cost+func(on+ Linear(regression:( “non`convex”( “convex”( Andrew(Ng( Logis(c+regression+cost+func(on+ If(y(=(1( 0( 1( Andrew(Ng( Logis(c+regression+cost+func(on+ If(y(=(0( 0( 1( Logis&c( Regression( Simplified(cost(func&on( and(gradient(descent( Machine(Learning( Andrew(Ng( Logis(c+regression+cost+func(on+ Andrew(Ng( Output(( Logis(c+regression+cost+func(on+ To(fit(parameters((((:(( To(make(a(predic&on(given(new(((:( Andrew(Ng( Gradient+Descent+ Want((((((((((((((((((((:( Repeat (simultaneously(update(all((((()( Andrew(Ng( Gradient+Descent+ Want((((((((((((((((((((:( (simultaneously(update(all((((()( Repeat Algorithm(looks(iden&cal(to(linear(regression!( Logis&c( Regression( Advanced(( op&miza&on( Machine(Learning( Andrew(Ng( Op(miza(on+algorithm+ Cost(func&on(((((((((.(Want((((((((((((((((((((.( Given((((,(we(have(code(that(can(compute( ` (( ` (( (for((((((((((((((((((((((((((((()( Repeat Gradient(descent:( Andrew(Ng( Op(miza(on+algorithm+ Given((((,(we(have(code(that(can(compute( ` (( ` (( (for((((((((((((((((((((((((((((()( Op&miza&on(algorithms:( ` Gradient(descent( ` Conjugate(gradient( ` BFGS( ` L`BFGS( Advantages:( ` No(need(to(manually(pick(( ` Oeen(faster(than(gradient( descent.( Disadvantages:( ` More(complex( Andrew(Ng( Example:( function [jVal, gradient] = costFunction(theta) jVal = (theta(1)-5)^2 + ... (theta(2)-5)^2; gradient = zeros(2,1); gradient(1) = 2*(theta(1)-5); gradient(2) = 2*(theta(2)-5); options = optimset(‘GradObj’, ‘on’, ‘MaxIter’, ‘100’); initialTheta = zeros(2,1); [optTheta, functionVal, exitFlag] ... = fminunc(@costFunction, initialTheta, options); Andrew(Ng( gradient(1) = [ ]; function [jVal, gradient] = costFunction(theta) theta = jVal = [ ]; gradient(2) = [ ]; gradient(n+1) = [ ]; code(to(compute( code(to(compute( code(to(compute( code(to(compute( Logis&c( Regression( Mul&`class(classifica&on:( One`vs`all( Machine(Learning( Andrew(Ng( Mul(class+classifica(on+ Email(foldering/tagging:(Work,(Friends,(Family,(Hobby( Medical(diagrams:(Not(ill,(Cold,(Flu( Weather:(Sunny,(Cloudy,(Rain,(Snow( Andrew(Ng( x1( x2( x1( x2( Binary(classifica&on:( Mul&`class(classifica&on:( Andrew(Ng( x1( x2( One?vs?all+(one?vs?rest):+ Class(1:( Class(2:( Class(3:( x1( x2( x1( x2( x1( x2( Andrew(Ng( One?vs?all+ Train(a(logis&c(regression(classifier(((((((((((((((for(each( class((((to(predict(the(probability(that(((((((((((.( On(a(new(input((((,(to(make(a(predic&on,(pick(the( class((((that(maximizes( 4. Process Modeling 4.1. Introduction to Process Modeling 4.1.4. What are some of the different statistical methods for model building? 4.1.4.1. Linear Least Squares Regression Modeling Workhorse Linear least squares regression is by far the most widely used modeling method. It is what most people mean when they say they have used "regression", "linear regression" or "least squares" to fit a model to their data. Not only is linear least squares regression the most widely used modeling method, but it has been adapted to a broad range of situations that are outside its direct scope. It plays a strong underlying role in many other modeling methods, including the other methods discussed in this section: nonlinear least squares regression, weighted least squares regression and LOESS. Definition of a Linear Least Squares Model Used directly, with an appropriate data set, linear least squares regression can be used to fit the data with any function of the form in which 1. each explanatory variable in the function is multiplied by an unknown parameter, 2. there is at most one unknown parameter with no corresponding explanatory variable, and 3. all of the individual terms are summed to produce the final function value. In statistical terms, any function that meets these criteria would be called a "linear function". The term "linear" is used, even though the function may not be a straight line, because if the unknown parameters are considered to be variables and the explanatory variables are considered to be known coefficients corresponding to those "variables", then the problem becomes a system (usually overdetermined) of linear equations that can be solved for the values of the unknown parameters. To differentiate the various meanings of the word "linear", the linear models being discussed here are often said to be "linear in the parameters" or "statistically linear". Why "Least Squares"? Linear least squares regression also gets its name from the way the estimates of the unknown parameters are computed. The "method of least squares" that is used to f(x;⃗ β) ⃗ = β0 + β1x1 + β2x2 + … obtain parameter estimates was independently developed in the late 1700's and the early 1800's by the mathematicians Karl Friedrich Gauss, Adrien Marie Legendre and (possibly) Robert Adrain [Stigler (1978)] [Harter (1983)] [Stigler (1986)] working in Germany, France and America, respectively. In the least squares method the unknown parameters are estimated by minimizing the sum of the squared deviations between the data and the model. The minimization process reduces the overdetermined system of equations formed by the data to a sensible system of , (where is the number of parameters in the functional part of the model) equations in unknowns. This new system of equations is then solved to obtain the parameter estimates. To learn more about how the method of least squares is used to estimate the parameters, see Section 4.4.3.1. Examples of Linear Functions As just mentioned above, linear models are not limited to being straight lines or planes, but include a fairly wide range of shapes. For example, a simple quadratic curve, is linear in the statistical sense. A straight-line model in , or a polynomial in , is also linear in the statistical sense because they are linear in the parameters, though not with respect to the observed explanatory variable, . Nonlinear Model Example Just as models that are linear in the statistical sense do not have to be linear with respect to the explanatory variables, nonlinear models can be linear with respect to the explanatory variables, but not with respect to the parameters. For example, is linear in , but it cannot be written in the general form of a linear model presented above. This is because the slope of this line is expressed as the product of two parameters. As a result, nonlinear least squares regression could be used to fit this model, but linear least squares cannot be used. For further examples and discussion of nonlinear models see the next section, Section 4.1.4.2. Advantages of Linear Least Squares Linear least squares regression has earned its place as the primary tool for process modeling because of its effectiveness and completeness. p p p f(x; β) = + x + , ⃗ β0 β1 β11x 2 log(x) f(x; β) = + ln(x), ⃗ β0 β1 sin(x) f(x; β) = + sin(x) + sin(2x) + sin(3x), ⃗ β0 β1 β2 β3 x f(x; β) = + x ⃗ β0 β0β1 x Though there are types of data that are better described by functions that are nonlinear in the parameters, many processes in science and engineering are well-described by linear models. This is because either the processes are inherently linear or because, over short ranges, any process can be well-approximated by a linear model. The estimates of the unknown parameters obtained from linear least squares regression are the optimal estimates from a broad class of possible parameter estimates under the usual assumptions used for process modeling. Practically speaking, linear least squares regression makes very efficient use of the data. Good results can be obtained with relatively small data sets. Finally, the theory associated with linear regression is wellunderstood and allows for construction of different types of easily-interpretable statistical intervals for predictions, calibrations, and optimizations. These statistical intervals can then be used to give clear answers to scientific and engineering questions. Disadvantages of Linear Least Squares The main disadvantages of linear least squares are limitations in the shapes that linear models can assume over long ranges, possibly poor extrapolation properties, and sensitivity to outliers. Linear models with nonlinear terms in the predictor variables curve relatively slowly, so for inherently nonlinear processes it becomes increasingly difficult to find a linear model that fits the data well as the range of the data increases. As the explanatory variables become extreme, the output of the linear model will also always more extreme. This means that linear models may not be effective for extrapolating the results of a process for which data cannot be collected in the region of interest. Of course extrapolation is potentially dangerous regardless of the model type. Finally, while the method of least squares often gives optimal estimates of the unknown parameters, it is very sensitive to the presence of unusual data points in the data used to fit a model. One or two outliers can sometimes seriously skew the results of a least squares analysis. This makes model validation, especially with respect to outliers, critical to obtaining sound answers to the questions motivating the construction of the model. 4. Process Modeling 4.1. Introduction to Process Modeling 4.1.4. What are some of the different statistical methods for model building? 4.1.4.2. Nonlinear Least Squares Regression Extension of Linear Least Squares Regression Nonlinear least squares regression extends linear least squares regression for use with a much larger and more general class of functions. Almost any function that can be written in closed form can be incorporated in a nonlinear regression model. Unlike linear regression, there are very few limitations on the way parameters can be used in the functional part of a nonlinear regression model. The way in which the unknown parameters in the function are estimated, however, is conceptually the same as it is in linear least squares regression. Definition of a Nonlinear Regression Model As the name suggests, a nonlinear model is any model of the basic form, in which 1. the functional part of the model is not linear with respect to the unknown parameters, , and 2. the method of least squares is used to estimate the values of the unknown parameters. Due to the way in which the unknown parameters of the function are usually estimated, however, it is often much easier to work with models that meet two additional criteria: 3. the function is smooth with respect to the unknown parameters, and 4. the least squares criterion that is used to obtain the parameter estimates has a unique solution. These last two criteria are not essential parts of the definition of a nonlinear least squares model, but are of practical importance. Examples of Nonlinear Models Some examples of nonlinear models include: y = f(x;⃗ β) + ε , ⃗ β0, β1, … f(x; β) ⃗ = β0 + β1x 1 + β2x Advantages of Nonlinear Least Squares The biggest advantage of nonlinear least squares regression over many other techniques is the broad range of functions that can be fit. Although many scientific and engineering processes can be described well using linear models, or other relatively simple types of models, there are many other processes that are inherently nonlinear. For example, the strengthening of concrete as it cures is a nonlinear process. Research on concrete strength shows that the strength increases quickly at first and then levels off, or approaches an asymptote in mathematical terms, over time. Linear models do not describe processes that asymptote very well because for all linear functions the function value can't increase or decrease at a declining rate as the explanatory variables go to the extremes. There are many types of nonlinear models, on the other hand, that describe the asymptotic behavior of a process well. Like the asymptotic behavior of some processes, other features of physical processes can often be expressed more easily using nonlinear models than with simpler model types. Being a "least squares" procedure, nonlinear least squares has some of the same advantages (and disadvantages) that linear least squares regression has over other methods. One common advantage is efficient use of data. Nonlinear regression can produce good estimates of the unknown parameters in the model with relatively small data sets. Another advantage that nonlinear least squares shares with linear least squares is a fairly well-developed theory for computing confidence, prediction and calibration intervals to answer scientific and engineering questions. In most cases the probabilistic interpretation of the intervals produced by nonlinear regression are only approximately correct, but these intervals still work very well in practice. Disadvantages of Nonlinear Least Squares The major cost of moving to nonlinear least squares regression from simpler modeling techniques like linear least squares is the need to use iterative optimization procedures to compute the parameter estimates. With functions that are linear in the parameters, the least squares estimates of the parameters can always be obtained analytically, while that is generally not the case with nonlinear models. The use of iterative procedures requires the user to provide starting values for the unknown parameters before the software can begin the optimization. The starting values must be reasonably close to the as yet unknown parameter estimates or the optimization procedure may not converge. Bad starting values can also cause the f(x; β) ⃗ = β1x β2 f(x; β) = + exp(− x) ⃗ β0 β1 β2 f(x;⃗ β) = sin( + ) + cos( + ) ⃗ β1 β2 β3x1 β4 β5 β6x2 software to converge to a local minimum rather than the global minimum that defines the least squares estimates. Disadvantages shared with the linear least squares procedure includes a strong sensitivity to outliers. Just as in a linear least squares analysis, the presence of one or two outliers in the data can seriously affect the results of a nonlinear analysis. In addition there are unfortunately fewer model validation tools for the detection of outliers in nonlinear regression than there are for linear regression. Home Aniruddha Bhandari — Updated On October 27th, 2023 Beginner Data Engineering Python Regression Structured Data Technique Master the Art of Data Science: A Step-by-Step Guide to Becoming an Industry-Ready Data Scientist Download Roadmap × Feature engineering is a critical step in building accurate and effective machine learning models. One key aspect of feature engineering is scaling, normalization, and standardization, which involves transforming the data to make it more suitable for modeling. These techniques can help to improve model performance, reduce the impact of outliers, and ensure that the data is on the same scale. In this article, we will explore the concepts of scaling, normalization, and standardization, including why they are important and how to apply them to different types of data. By the end of this article, you’ll have a thorough understanding of these essential feature engineering techniques and be able to apply them to your own machine learning projects. Table of contents What is Feature Scaling? Why Should we Use Feature Scaling? What is Normalization? What is Standardization? The Big Question – Normalize or Standardize? Implementing Feature Scaling in Python Comparing Unscaled, Normalized, and Standardized Data Applying Scaling to Machine Learning Algorithms Frequently Asked Questions What is Feature Scaling? Feature scaling is a data preprocessing technique used to transform the values of features or variables in a dataset to a similar scale. The purpose is to ensure that all features contribute equally to the model and to avoid the domination of features with larger values. Feature scaling becomes necessary when dealing with datasets containing features that have different ranges, units of measurement, or orders of magnitude. In such cases, the variation in feature values can lead to biased model performance or difficulties during the learning process. There are several common techniques for feature scaling, including standardization, normalization, and min-max scaling. These methods adjust the feature values while preserving their relative relationships and distributions. By applying feature scaling, the dataset’s features can be transformed to a more consistent scale, making it easier to build accurate and effective machine learning models. Scaling facilitates meaningful comparisons between features, improves model convergence, and prevents certain features from overshadowing others based solely on their magnitude. Why Should we Use Feature Scaling? Some machine learning algorithms are sensitive to feature scaling, while others are virtually invariant. Let’s explore these in more depth: 1. Gradient Descent Based Algorithms Machine learning algorithms like linear regression, logistic regression, neural network, PCA (principal component analysis), etc., that use gradient descent as an optimization technique require data to be scaled. Take a look at the formula for gradient descent below: The presence of feature value X in the formula will affect the step size of the gradient descent. The difference in the ranges of features will cause different step sizes for each feature. To ensure that the gradient descent moves smoothly towards the minima and that the steps for gradient descent are updated at the same rate for all the features, we scale the data before feeding it to the model. 2. Distance-Based Algorithms Distance algorithms like KNN, K-means clustering, and SVM(support vector machines) are most affected by the range of features. This is because, behind the scenes, they are using distances between data points to determine their similarity. For example, let’s say we have data containing high school CGPA scores of students (ranging from 0 to 5) and their future incomes (in thousands Rupees): Since both the features have different scales, there is a chance that higher weightage is given to features with higher magnitudes. This will impact the performance of the machine learning algorithm; obviously, we do not want our algorithm to be biased towards one feature. “ Having features on a similar scale can help the gradient descent converge more quickly towards the minima. ‘ Therefore, we scale our data before employing a distance based algorithm so that all the features contribute equally to the result. The effect of scaling is conspicuous when we compare the Euclidean distance between data points for students A and B, and between B and C, before and after scaling, as shown below: Distance AB before scaling => Distance BC before scaling => Distance AB after scaling => Distance BC after scaling => 3. Tree-Based Algorithms Tree-based algorithms, on the other hand, are fairly insensitive to the scale of the features. Think about it, a decision tree only splits a node based on a single feature. The decision tree splits a node on a feature that increases the homogeneity of the node. Other features do not influence this split on a feature. So, the remaining features have virtually no effect on the split. This is what makes them invariant to the scale of the features! What is Normalization? Normalization is a data preprocessing technique used to adjust the values of features in a dataset to a common scale. This is done to facilitate data analysis and modeling, and to reduce the impact of different scales on the accuracy of machine learning models. Normalization is a scaling technique in which values are shifted and rescaled so that they end up ranging between 0 and 1. Itis also known as Min-Max scaling. Here’s the formula for normalization: Here, Xmax and Xmin are the maximum and the minimum values of the feature, respectively. When the value of X is the minimum value in the column, the numerator will be 0, and hence X’ is 0 On the other hand, when the value of X is the maximum value in the column, the numerator is equal to the denominator, and thus the value of X’ is 1 If the value of X is between the minimum and the maximum value, then the value of X’ is between 0 and 1 What is Standardization? Standardization is another scaling method where the values are centered around the mean with a unit standard deviation. This means that the mean of the attribute becomes zero, and the resultant distribution has a unit standard deviation. Here’s the formula for standardization: is the mean of the feature values and is the standard deviation of the feature values. Note that, in this case, the values are not restricted to a particular range. Now, the big question in your mind must be when should we use normalization and when should we use standardization? Let’s find out! The Big Question – Normalize or Standardize? Normalization Standardization Rescales values to a range between 0 and 1 Centers data around the mean and scales to a standard deviation of 1 Useful when the distribution of the data is unknown or not Gaussian Useful when the distribution of the data is Gaussian or unknown Sensitive to outliers Less sensitive to outliers Retains the shape of the original distribution Changes the shape of the original distribution May not preserve the relationships between the data points Preserves the relationships between the data points Equation: (x – min)/(max – min) Equation: (x – mean)/standard deviation However, at the end of the day, the choice of using normalization or standardization will depend on your problem and the machine learning algorithm you are using. There is no hard and fast rule to tell you when to normalize or standardize your data. You can always start by fitting your model to raw, normalized, and standardized data and comparing the performance for the best results. It is a good practice to fit the scaler on the training data and then use it to transform the testing data. This would avoid any data leakage during the model testing process. Also, the scaling of target values is generally not required. Implementing Feature Scaling in Python Now comes the fun part – putting what we have learned into practice. I will be applying feature scaling to a few machine-learning algorithms on the Big Mart dataset. I’ve taken on the DataHack platform. I will skip the preprocessing steps since they are out of the scope of this tutorial. But you can find them neatly explained in this article. Those steps will enable you to reach the top 20 percentile on the hackathon leaderboard, so that’s worth checking out! view raw So, let’s first split our data into training and testing sets: Python Code: @ShilpiMazumdar1/feature_scaling A Nix repl by ShilpiMazumdar1 Open on Replit Show code 0 Run 42 SH Before moving to the feature scaling part, let’s glance at the details of our data using the pd.describe() method: We can see that there is a huge difference in the range of values present in our numerical features: Item_Visibility, Item_Weight, Item_MRP, and Outlet_Establishment_Year. Let’s try and fix that using feature scaling! Note: You will notice negative values in the Item_Visibility feature because I have taken log-transformation to deal with the skewness in the feature. Normalization Using sklearn (scikit-learn) To normalize your data, you need to import the MinMaxScaler from the sklearn library and apply it to our dataset. So, let’s do that! NormalizationVsStandarization_2.py hosted with ❤ by GitHub Let’s see how normalization has affected our dataset: 1 # data normalization with sklearn 2 from sklearn.preprocessing import MinMaxScaler 3 4 # fit scaler on training data 5 norm = MinMaxScaler().fit(X_train) 6 7 # transform training data 8 X_train_norm = norm.transform(X_train) 9 10 # transform testing dataabs 11 X_test_norm = norm.transform(X_test) view raw All the features now have a minimum value of 0 and a maximum value of 1. Perfect! Try out the above code in the live coding window below!! @LakshayArora1/FeatureScaling A Python repl by LakshayArora1 Open on Replit Show code 0 Run 22 LA Next, let’s try to standardize our data. Standardization Using sklearn To standardize your data, you need to import the StandardScaler from the sklearn library and apply it to our dataset. Here’s how you can do it: NormalizationVsStandarization_3.py hosted with ❤ by GitHub 1 # data standardization with sklearn 2 from sklearn.preprocessing import StandardScaler 3 4 # copy of datasets 5 X_train_stand = X_train.copy() 6 X_test_stand = X_test.copy() 7 8 # numerical features 9 num_cols = ['Item_Weight','Item_Visibility','Item_MRP','Outlet_Establishment_Year'] 10 11 # apply standardization on numerical features 12 for i in num_cols: 13 14 # fit on training data column 15 scale = StandardScaler().fit(X_train_stand[[i]]) 16 17 # transform the training data column 18 X_train_stand[i] = scale.transform(X_train_stand[[i]]) 19 20 # transform the testing data column 21 X_test_stand[i] = scale.transform(X_test_stand[[i]]) You would have noticed that I only applied standardization to my numerical columns, not the other One-Hot Encoded features. Standardizing the One-Hot encoded features would mean assigning a distribution to categorical features. You don’t want to do that! But why did I not do the same while normalizing the data? Because One-Hot encoded features are already in the range between 0 to 1. So, normalization would not affect their value. Right, let’s have a look at how standardization has transformed our data: The numerical features are now centered on the mean with a unit standard deviation. Awesome! Comparing Unscaled, Normalized, and Standardized Data It is always great to visualize your data to understand the distribution present. We can see the comparison between our unscaled and scaled data using boxplots. You can learn more about data visualization here. You can notice how scaling the features brings everything into perspective. The features are now more comparable and will have a similar effect on the learning models. Applying Scaling to Machine Learning Algorithms It’s now time to train some machine learning algorithms on our data to compare the effects of different scaling techniques on the algorithm’s performance. I want to see the effect of scaling on three algorithms in particular: KNearest Neighbors, Support Vector Regressor, and Decision Tree. K-Nearest Neighbors As we saw before, KNN is a distance-based algorithm that is affected by the range of features. Let’s see how it performs on our data before and after scaling: NormalizationVsStandarization_4.py hosted with ❤ by GitHub view raw You can see that scaling the features has brought down the RMSE score of our KNN model. Specifically, the normalized data performs a tad bit better than the standardized data. Note: I am measuring the RMSE here because this competition evaluates the RMSE. Support Vector Regressor SVR is another distance-based algorithm. So let’s check out whether it works better with normalization or standardization: 1 # training a KNN model 2 from sklearn.neighbors import KNeighborsRegressor 3 # measuring RMSE score 4 from sklearn.metrics import mean_squared_error 5 6 # knn 7 knn = KNeighborsRegressor(n_neighbors=7) 8 9 rmse = [] 10 11 # raw, normalized and standardized training and testing data 12 trainX = [X_train, X_train_norm, X_train_stand] 13 testX = [X_test, X_test_norm, X_test_stand] 14 15 # model fitting and measuring RMSE 16 for i in range(len(trainX)): 17 18 # fit 19 knn.fit(trainX[i],y_train) 20 # predict 21 pred = knn.predict(testX[i]) 22 # RMSE 23 rmse.append(np.sqrt(mean_squared_error(y_test,pred))) 24 25 # visualizing the result 26 df_knn = pd.DataFrame({'RMSE':rmse},index=['Original','Normalized','Standardized']) 27 df_knn 1 # training an SVR model 2 from sklearn.svm import SVR 3 # measuring RMSE score 4 from sklearn.metrics import mean_squared_error 5 6 # SVR 7 svr = SVR(kernel='rbf',C=5) 8 9 rmse = [] 10 11 # raw, normalized and standardized training and testing data 12 trainX = [X_train, X_train_norm, X_train_stand] 13 testX = [X_test, X_test_norm, X_test_stand] 14 15 # model fitting and measuring RMSE 16 for i in range(len(trainX)): 17 18 # fit 19 svr.fit(trainX[i],y_train) 20 # predict view raw view raw NormalizationVsStandarization_5.py hosted with ❤ by GitHub We can see that scaling the features does bring down the RMSE score. And the standardized data has performed better than the normalized data. Why do you think that’s the case? The sklearn documentation states that SVM, with RBF kernel, assumes that all the features are centered around zero and variance is of the same order. This is because a feature with a variance greater than that of others prevents the estimator from learning from all the features. Great! Decision Tree We already know that a Decision tree is invariant to feature scaling. But I wanted to show a practical example of how it performs on the data: NormalizationVsStandarization_6.py hosted with ❤ by GitHub 21 pred = svr.predict(testX[i]) 22 # RMSE 23 rmse.append(np.sqrt(mean_squared_error(y_test,pred))) 24 25 # visualizing the result 26 df_svr = pd.DataFrame({'RMSE':rmse},index=['Original','Normalized','Standardized']) 27 df_svr 1 # training a Decision Tree model 2 from sklearn.tree import DecisionTreeRegressor 3 # measuring RMSE score 4 from sklearn.metrics import mean_squared_error 5 6 # Decision tree 7 dt = DecisionTreeRegressor(max_depth=10,random_state=27) 8 9 rmse = [] 10 11 # raw, normalized and standardized training and testing data 12 trainX = [X_train,X_train_norm,X_train_stand] 13 testX = [X_test,X_test_norm,X_test_stand] 14 15 # model fitting and measuring RMSE 16 for i in range(len(trainX)): 17 18 # fit 19 dt.fit(trainX[i],y_train) 20 # predict 21 pred = dt.predict(testX[i]) 22 # RMSE 23 rmse.append(np.sqrt(mean_squared_error(y_test,pred))) 24 25 # visualizing the result 26 df_dt = pd.DataFrame({'RMSE':rmse},index=['Original','Normalized','Standardized']) 27 df_dt You can see that the RMSE score has not moved an inch on scaling the features. So rest assured when you are using tree-based algorithms on your data! Build Effective Machine Learning Models This tutorial covered the relevance of using feature scaling on your data and how normalization and standardization have varying effects on the working of machine learning algorithms. Remember that there is no correct answer to when to use normalization over standardization and vice-versa. It all depends on your data and the algorithm you are using. To enhance your skills in feature engineering and other key data science techniques, consider enrolling in our Data Science Black Belt program. Our comprehensive curriculum covers all aspects of data science, including advanced topics such as feature engineering, machine learning, and deep learning. With hands-on projects and mentorship, you’ll gain practical experience and the skills you need to succeed in this exciting field. Enroll today and take your data science skills to the next level! Frequently Asked Questions Q1. How is Standardization differentfrom Normalization feature scaling? A. Standardization centers data around a mean of zero and a standard deviation of one, while normalization scales data to a set range, often [0, 1], by using the minimum and maximum values. Q2. Why is Standardization used in machine learning? A. Standardization ensures algorithmic stability and prevents sensitivity to the scale of input features, improves optimization algorithms’ convergence and search efficiency, and enhances the performance of certain machine learning algorithms. Q3. Why is Normalization used in machine learning? A. Normalization helps in scaling the input features to a fixed range, typically [0, 1], to ensure that no single feature disproportionately impacts the results. It preserves the relationship between the minimum and maximum values of each feature, which can be important for some algorithms. It also improves the convergence and stability of some machine learning algorithms, particularly those that use gradient-based optimization. Q4. Why do we normalize values? A. We normalize values to bring them into a common scale, making it easier to compare and analyze data. Normalization also helps to reduce the impact of outliers and improve the accuracy and stability of statistical models. Q5. How do you normalize a set of values? A. To normalize a set of values, we first calculate the mean and standard deviation of the data. Then, we subtract the mean from each value and divide by the standard deviation to obtain standardized values with a mean of 0 and a standard deviation of 1. Alternatively, we can use other normalization techniques such as min-max normalization, where we scale the values to a range of 0 to 1, or unit vector normalization, where we scale the values to have a length of 1. Feature scaling feature scaling machine learning feature scaling python live coding normalizaiton vs. standardization normalization standardization “Feature engineering is the process of transforming raw data into features that better represent the underlying problem to the predictive models, resulting in improved model accuracy on unseen data.” — Dr. Jason Brownlee This gives us an idea about feature engineering being the process of transforming data into features to act as inputs for machine learning models such that good quality features help in improving the overall model performance. Features are also very much dependent on the underlying problem. Thus, even though the machine learning task might be same in different scenarios, like classification of emails into spam and non-spam or classifying handwritten digits, the features extracted in each scenario will be very different from the other. Prof. Pedro Domingos from the University of Washington, in his paper titled, “A Few Useful Things to Know about Machine Learning” tells us the following. “At the end of the day, some machine learning projects succeed and some fail. What makes the dif erence? Easily the most important factor is the features used.” — Prof. Pedro Domingos The final quote which should motivate you about feature engineering is from renowned Kaggler, Xavier Conort. Most of you already know that tough real-world machine learning problems are often posted on Kaggle regularly which is usually open to everyone. “The algorithms we used are very standard for Kagglers. …We spent most of our ef orts in feature engineering. … We were also very careful to discard features likely to expose us to the risk of over-fitting our model.” — Xavier Conort Understanding Features A feature is typically a specific representation on top of raw data, which is an individual, measurable attribute, typically depicted by a column in a dataset. Considering a generic two-dimensional dataset, each observation is depicted by a row and each feature by a column, which will have a specific value for an observation. A generic dataset snapshot Thus like in the example in the figure above, each row typically indicates a feature vector and the entire set of features across all the observations forms a two-dimensional feature matrix also known as a feature-set. This is akin to data frames or spreadsheets representing two-dimensional data. Typically machine learning algorithms work with these numeric matrices or tensors and hence most feature engineering techniques deal with converting raw data into some numeric representations which can be easily understood by these algorithms. Features can be of two major types based on the dataset. Inherent raw features are obtained directly from the dataset with no extra data manipulation or engineering. Derived features are usually obtained from feature engineering, where we extract features from existing data attributes. A simple example would be creating a new feature “Age” from an employee dataset containing “Birthdate” by just subtracting their birth date from the current date. There are diverse types and formats of data including structured and unstructured data. In this article, we will discuss various feature engineering strategies for dealing with structured continuous numeric data. All these examples are a part of one of my recent books ‘Practical Machine Learning with Python’ and you can access relevant datasets and code used in this article on GitHub. A big shout out also goes to Gabriel Moreira who helped me by providing some excellent pointers on feature engineering techniques. Feature Engineering on Numeric Data Numeric data typically represents data in the form of scalar values depicting observations, recordings or measurements. Here, by numeric data, we mean continuous data and not discrete data which is typically represented as categorical data. Numeric data can also be represented as a vector of values where each value or entity in the vector can represent a specific feature. Integers and floats are the most common and widely used numeric data types for continuous numeric data. Even though numeric data can be directly fed into machine learning models, you would still need to engineer features which are relevant to the scenario, problem and domain before building a model. Hence the need for feature engineering still remains. Let’s leverage python and look at some strategies for feature engineering on numeric data. We load up the following necessary dependencies first (typically in a Jupyter notebook). import pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as spstats %matplotlib inline Raw Measures Like we mentioned earlier, raw numeric data can often be fed directly to machine learning models based on the context and data format. Raw measures are typically indicated using numeric variables directly as features without any form of transformation or engineering. Typically these features can indicate values or counts. Let’s load up one of our datasets, the Pokémon dataset also available on Kaggle. poke_df = pd.read_csv('datasets/Pokemon.csv', encoding='utf-8') poke_df.head() Snapshot of our Pokemon dataset Pokémon is a huge media franchise surrounding fictional characters called Pokémon which stands for pocket monsters. In short, you can think of them as fictional animals with superpowers! This dataset consists of these characters with various statistics for each character. Values If you closely observe the data frame snapshot in the above figure, you can see that several attributes represent numeric raw values which can be used directly. The following snippet depicts some of these features with more emphasis. poke_df[['HP', 'Attack', 'Defense']].head() Features with (continuous) numeric data Thus, you can directly use these attributes as features which are depicted in the above data frame. These include each Pokémon’s HP (Hit Points), Attack and Defense stats. In fact, we can also compute some basic statistical measures on these fields. poke_df[['HP', 'Attack', 'Defense']].describe() Basic descriptive statistics on numeric features With this you can get a good idea about statistical measures in these features like count, average, standard deviation and quartiles. Counts Another form of raw measures include features which represent frequencies, counts or occurrences of specific attributes. Let’s look at a sample of data from the millionsong dataset which depicts counts or frequencies of songs which have been heard by various users. popsong_df = pd.read_csv('datasets/song_views.csv', encoding='utf-8') popsong_df.head(10) Song listen counts as a numeric feature It is quite evident from the above snapshot that the listen_count field can be used directly as a frequency\count based numeric feature. Binarization Often raw frequencies or counts may not be relevant for building a model based on the problem which is being solved. For instance if I’m building a recommendation system for song recommendations, I would just want to know if a person is interested or has listened to a particular song. This doesn’t require the number of times a song has been listened to since I am more concerned about the various songs he\she has listened to. In this case, a binary feature is preferred as opposed to a count based feature. We can binarize our listen_count field as follows. watched = np.array(popsong_df['listen_count']) watched[watched >= 1] = 1 popsong_df['watched'] = watched You can also use scikit-learn's Binarizer class here from its preprocessing module to perform the same task instead of numpy arrays. from sklearn.preprocessing import Binarizer bn = Binarizer(threshold=0.9) pd_watched = bn.transform([popsong_df['listen_count']])[0] popsong_df['pd_watched'] = pd_watched popsong_df.head(11) Binarizing song counts You can clearly see from the above snapshot that both the methods have produced the same result. Thus we get a binarized feature indicating if the song was listened to or not by each user which can be then further used in a relevant model. Rounding Often when dealing with continuous numeric attributes like proportions or percentages, we may not need the raw values having a high amount of precision. Hence it often makes sense to round off these high precision percentages into numeric integers. These integers can then be directly used as raw values or even as categorical (discrete-class based) features. Let’s try applying this concept in a dummy dataset depicting store items and their popularity percentages. items_popularity = pd.read_csv('datasets/item_popularity.csv', encoding='utf-8') items_popularity['popularity_scale_10'] = np.array( np.round((items_popularity['pop_percent'] * 10)), dtype='int') items_popularity['popularity_scale_100'] = np.array( np.round((items_popularity['pop_percent'] * 100)), dtype='int') items_popularity Rounding popularity to different scales Based on the above ouputs, you can guess that we tried two forms of rounding. The features depict the item popularities now both on a scale of 1–10 and on a scale of 1–100. You can use these values both as numerical or categorical features based on the scenario and problem. Interactions Supervised machine learning models usually try to model the output responses (discrete classes or continuous values) as a function of the input feature variables. For example, a simple linear regression equation can be depicted as where the input features are depicted by variables having weights or coefficients denoted by respectively and the goal is to predict the response y. In this case, this simple linear model depicts the relationship between the output and inputs, purely based on the individual, separate input features. However, often in several real-world scenarios, it makes sense to also try and capture the interactions between these feature variables as a part of the input feature set. A simple depiction of the extension of the above linear regression formulation with interaction features would be where the features represented by denote the interaction features. Let’s try engineering some interaction features on our Pokémon dataset now. atk_def = poke_df[['Attack', 'Defense']] atk_def.head() From the output data frame, we can see that we have two numeric (continuous) features, Attack and Defence . We will now build features up to the 2nd degree by leveraging scikit-learn . from sklearn.preprocessing import PolynomialFeatures pf = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False) res = pf.fit_transform(atk_def) res Output ------ array([[ 49., 49., 2401., 2401., 2401.], [ 62., 63., 3844., 3906., 3969.], [ 82., 83., 6724., 6806., 6889.], ..., [ 110., 60., 12100., 6600., 3600.], [ 160., 60., 25600., 9600., 3600.], [ 110., 120., 12100., 13200., 14400.]]) The above feature matrix depicts a total of five features including the new interaction features. We can see the degree of each feature in the above matrix as follows. pd.DataFrame(pf.powers_, columns=['Attack_degree', 'Defense_degree']) Looking at this output, we now know what each feature actually represents from the degrees depicted here. Armed with this knowledge, we can assign a name to each feature now as follows. This is just for ease of understanding and you should name your features with better, easy to access and simple names. intr_features = pd.DataFrame(res, columns=['Attack', 'Defense', 'Attack^2', 'Attack x Defense', 'Defense^2']) intr_features.head(5) Numeric features with their interactions Thus the above data frame represents our original features along with their interaction features. Binning The problem of working with raw, continuous numeric features is that often the distribution of values in these features will be skewed. This signifies that some values will occur quite frequently while some will be quite rare. Besides this, there is also another problem of the varying range of values in any of these features. For instance view counts of specific music videos could be abnormally large (Despacito we’re looking at you!) and some could be really small. Directly using these features can cause a lot of issues and adversely affect the model. Hence there are strategies to deal with this, which include binning and transformations. Binning, also known as quantization is used for transforming continuous numeric features into discrete ones (categories). These discrete values or numbers can be thought of as categories or bins into which the raw, continuous numeric values are binned or grouped into. Each bin represents a specific degree of intensity and hence a specific range of continuous numeric values fall into it. Specific strategies of binning data include fixed-width and adaptive binning. Let’s use a subset of data from a dataset extracted from the 2016 FreeCodeCamp Developer\Coder survey which talks about various attributes pertaining to coders and software developers. fcc_survey_df = pd.read_csv('datasets/fcc_2016_coder_survey_subset.csv', encoding='utf-8') fcc_survey_df[['ID.x', 'EmploymentField', 'Age', 'Income']].head() Sample attributes from the FCC coder survey dataset The ID.x variable is basically a unique identifier for each coder\developer who took the survey and the other fields are pretty self-explanatory. Fixed-Width Binning Just like the name indicates, in fixed-width binning, we have specific fixed widths for each of the bins which are usually pre-defined by the user analyzing the data. Each bin has a pre-fixed range of values which should be assigned to that bin on the basis of some domain knowledge, rules or constraints. Binning based on rounding is one of the ways, where you can use the rounding operation which we discussed earlier to bin raw values. Let’s now consider the Age feature from the coder survey dataset and look at its distribution. fig, ax = plt.subplots() fcc_survey_df['Age'].hist(color='#A9C5D3', edgecolor='black', grid=False) ax.set_title('Developer Age Histogram', fontsize=12) ax.set_xlabel('Age', fontsize=12) ax.set_ylabel('Frequency', fontsize=12) Histogram depicting developer age distribution The above histogram depicting developer ages is slightly right skewed as expected (lesser aged developers). We will now assign these raw age values into specific bins based on the following scheme Age Range: Bin --------------- 0 - 9 : 0 10 - 19 : 1 20 - 29 : 2 30 - 39 : 3 40 - 49 : 4 50 - 59 : 5 60 - 69 : 6 ... and so on We can easily do this using what we learnt in the Rounding section earlier where we round off these raw age values by taking the floor value after dividing it by 10. fcc_survey_df['Age_bin_round'] = np.array(np.floor( np.array(fcc_survey_df['Age']) / 10.)) fcc_survey_df[['ID.x', 'Age', 'Age_bin_round']].iloc[1071:1076] Binning by rounding You can see the corresponding bins for each age have been assigned based on rounding. But what if we need more flexibility? What if we want to decide and fix the bin widths based on our own rules\logic? Binning based on custom ranges will help us achieve this. Let’s define some custom age ranges for binning developer ages using the following scheme. Age Range : Bin --------------- 0 - 15 : 1 16 - 30 : 2 31 - 45 : 3 46 - 60 : 4 61 - 75 : 5 75 - 100 : 6 Based on this custom binning scheme, we will now label the bins for each developer age value and we will store both the bin range as well as the corresponding label. bin_ranges = [0, 15, 30, 45, 60, 75, 100] bin_names = [1, 2, 3, 4, 5, 6] fcc_survey_df['Age_bin_custom_range'] = pd.cut( np.array( fcc_survey_df['Age']), bins=bin_ranges) fcc_survey_df['Age_bin_custom_label'] = pd.cut( np.array( fcc_survey_df['Age']), bins=bin_ranges, labels=bin_names) # view the binned features fcc_survey_df[['ID.x', 'Age', 'Age_bin_round', 'Age_bin_custom_range', 'Age_bin_custom_label']].iloc[10a71:1076] Custom binning scheme for developer ages Adaptive Binning The drawback in using fixed-width binning is that due to us manually deciding the bin ranges, we can end up with irregular bins which are not uniform based on the number of data points or values which fall in each bin. Some of the bins might be densely populated and some of them might be sparsely populated or even empty! Adaptive binning is a safer strategy in these scenarios where we let the data speak for itself! That’s right, we use the data distribution itself to decide our bin ranges. Quantile based binning is a good strategy to use for adaptive binning. Quantiles are specific values or cut-points which help in partitioning the continuous valued distribution of a specific numeric field into discrete contiguous bins or intervals. Thus, q-Quantiles help in partitioning a numeric attribute into q equal partitions. Popular examples of quantiles include the 2-Quantile known as the median which divides the data distribution into two equal bins, 4-Quantiles known as the quartiles which divide the data into 4 equal bins and 10-Quantiles also known as the deciles which create 10 equal width bins. Let’s now look at the data distribution for the developer Income field. fig, ax = plt.subplots() fcc_survey_df['Income'].hist(bins=30, color='#A9C5D3', edgecolor='black', grid=False) ax.set_title('Developer Income Histogram', fontsize=12) ax.set_xlabel('Developer Income', fontsize=12) ax.set_ylabel('Frequency', fontsize=12) Histogram depicting developer income distribution The above distribution depicts a right skew in the income with lesser developers earning more money and vice versa. Let’s take a 4-Quantile or a quartile based adaptive binning scheme. We can obtain the quartiles easily as follows. quantile_list = [0, .25, .5, .75, 1.] quantiles = fcc_survey_df['Income'].quantile(quantile_list) quantiles Output ------ 0.00 6000.0 0.25 20000.0 0.50 37000.0 0.75 60000.0 1.00 200000.0 Name: Income, dtype: float64 Let’s now visualize these quantiles in the original distribution histogram! fig, ax = plt.subplots() fcc_survey_df['Income'].hist(bins=30, color='#A9C5D3', edgecolor='black', grid=False) for quantile in quantiles: qvl = plt.axvline(quantile, color='r') ax.legend([qvl], ['Quantiles'], fontsize=10) ax.set_title('Developer Income Histogram with Quantiles', fontsize=12) ax.set_xlabel('Developer Income', fontsize=12) ax.set_ylabel('Frequency', fontsize=12) Histogram depicting developer income distribution with quartile values The red lines in the distribution above depict the quartile values and our potential bins. Let’s now leverage this knowledge to build our quartile based binning scheme. quantile_labels = ['0-25Q', '25-50Q', '50-75Q', '75-100Q'] fcc_survey_df['Income_quantile_range'] = pd.qcut( fcc_survey_df['Income'], q=quantile_list) fcc_survey_df['Income_quantile_label'] = pd.qcut( fcc_survey_df['Income'], q=quantile_list, labels=quantile_labels) fcc_survey_df[['ID.x', 'Age', 'Income', 'Income_quantile_range', 'Income_quantile_label']].iloc[4:9] Quantile based bin ranges and labels for developer incomes This should give you a good idea of how quantile based adaptive binning works. An important point to remember here is that the resultant outcome of binning leads to discrete valued categorical features and you might need an additional step of feature engineering on the categorical data before using it in any model. We will cover feature engineering strategies for categorical data shortly in the next part! Statistical Transformations We talked about the adverse effects of skewed data distributions briefly earlier. Let’s look at a different strategy of feature engineering now by making use of statistical or mathematical transformations.We will look at the Log transform as well as the Box-Cox transform. Both of these transform functions belong to the Power Transform family of functions, typically used to create monotonic data transformations. Their main significance is that they help in stabilizing variance, adhering closely to the normal distribution and making the data independent of the mean based on its distribution Log Transform The log transform belongs to the power transform family of functions. This function can be mathematically represented as which reads as log of x to the base b is equal to y. This can then be translated into which indicates as to what power must the base b be raised to in order to get x. The natural logarithm uses b=e where e = 2.71828 popularly known as Euler’s number. You can also use base b=10 used popularly in the decimal system. Log transforms are useful when applied to skewed distributions as they tend to expand the values which fall in the range of lower magnitudes and tend to compress or reduce the values which fall in the range of higher magnitudes. This tends to make the skewed distribution as normal-like as possible. Let’s use log transform on our developer Income feature which we used earlier. fcc_survey_df['Income_log'] = np.log((1+ fcc_survey_df['Income'])) fcc_survey_df[['ID.x', 'Age', 'Income', 'Income_log']].iloc[4:9] Log transform on developer income The Income_log field depicts the transformed feature after log transformation. Let’s look at the data distribution on this transformed field now. income_log_mean = np.round(np.mean(fcc_survey_df['Income_log']), 2) fig, ax = plt.subplots() fcc_survey_df['Income_log'].hist(bins=30, color='#A9C5D3', edgecolor='black', grid=False) plt.axvline(income_log_mean, color='r') ax.set_title('Developer Income Histogram after Log Transform', fontsize=12) ax.set_xlabel('Developer Income (log scale)', fontsize=12) ax.set_ylabel('Frequency', fontsize=12) ax.text(11.5, 450, r'$\mu$='+str(income_log_mean), fontsize=10) Histogram depicting developer income distribution after log transform Based on the above plot, we can clearly see that the distribution is more normal-like or gaussian as compared to the skewed distribution on the original data. Box-Cox Transform The Box-Cox transform is another popular function belonging to the power transform family of functions. This function has a pre-requisite that the numeric values to be transformed must be positive (similar to what log transform expects). In case they are negative, shifting using a constant value helps. Mathematically, the Box-Cox transform function can be denoted as follows. Such that the resulted transformed output y is a function of input x and the transformation parameter λ such that when λ = 0, the resultant transform is the natural log transform which we discussed earlier. The optimal value of λ is usually determined using a maximum likelihood or loglikelihood estimation. Let’s now apply the Box-Cox transform on our developer income feature. First we get the optimal lambda value from the data distribution by removing the non-null values as follows. income = np.array(fcc_survey_df['Income']) income_clean = income[~np.isnan(income)] l, opt_lambda = spstats.boxcox(income_clean) print('Optimal lambda value:', opt_lambda) Output ------ Optimal lambda value: 0.117991239456 Now that we have obtained the optimal λ value, let us use the Box-Cox transform for two values of λ such that λ = 0 and λ = λ(optimal) and transform the developer Income feature. fcc_survey_df['Income_boxcox_lambda_0'] = spstats.boxcox( (1+fcc_survey_df['Income']), lmbda=0) fcc_survey_df['Income_boxcox_lambda_opt'] = spstats.boxcox( fcc_survey_df['Income'], lmbda=opt_lambda) fcc_survey_df[['ID.x', 'Age', 'Income', 'Income_log', 'Income_boxcox_lambda_0', 'Income_boxcox_lambda_opt']].iloc[4:9] Developer income distribution after Box-Cox transform The transformed features are depicted in the above data frame. Just like we expected, Income_log and Income_boxcox_lamba_0 have the same values. Let’s look at the distribution of the transformed Income feature after transforming with the optimal λ. income_boxcox_mean = np.round( np.mean( fcc_survey_df['Income_boxcox_lambda_opt']),2) fig, ax = plt.subplots() fcc_survey_df['Income_boxcox_lambda_opt'].hist(bins=30, color='#A9C5D3', edgecolor='black', grid=False) plt.axvline(income_boxcox_mean, color='r') ax.set_title('Developer Income Histogram after Box–Cox Transform', fontsize=12) ax.set_xlabel('Developer Income (Box–Cox transform)', fontsize=12) ax.set_ylabel('Frequency', fontsize=12) ax.text(24, 450, r'$\mu$='+str(income_boxcox_mean), fontsize=10) Histogram depicting developer income distribution after Box-Cox transform The distribution looks more normal-like similar to what we obtained after the log transform. Conclusion Feature engineering is a very important aspect of machine learning and data science and should never be ignored. While we have automated feature engineering methodologies like deep learning as well as automated machine learning frameworks like AutoML (which still stresses that it requires good features to work well!). Feature engineering is here to stay and even some of these automated methodologies often require specific engineered features based on the data type, domain and the problem to be solved. We looked at popular strategies for feature engineering on continuous numeric data in this article. In the next part, we will look at popular strategies for dealing with discrete, categorical data and then move on to unstructured data types in future articles. Stay tuned! All the code and datasets used in this article can be accessed from my GitHub The code is also available as a Jupyter notebook Follow Machine Learning Data Science Python Feature Engineering Tds Feature Engineering UNDERSTANDING FEATURE ENGINEERING (PART 2) Categorical Data Strategies for working with discrete, categorical data Dipanjan (DJ) Sarkar · Follow Published in Towards Data Science 14 min read · Jan 6, 2018 Listen Share More Source: https://pixabay.com Introduction We covered various feature engineering strategies for dealing with structured continuous numeric data in the previous article in this series. In this article, we will look at another type of structured data, which is discrete in nature and is popularly termed as categorical data. Dealing with numeric data is often easier than categorical data given that we do not have to deal with additional complexities of the semantics pertaining to each category value in any data attribute which is of a categorical type. We will use a hands-on approach to discuss several encoding Get unlimited access to the best of Medium for less than $1/week. Become a member schemes for dealing with categorical data and also a couple of popular techniques for dealing with large scale feature explosion, often known as the “curse of dimensionality”. Motivation I’m sure by now you must realize the motivation and the importance of feature engineering, we do stress on the same in detail in ‘Part 1’ of this series. Do check it out for a quick refresher if necessary. In short, machine learning algorithms cannot work directly with categorical data and you do need to do some amount of engineering and transformations on this data before you can start modeling on your data. Understanding Categorical Data Let’s get an idea about categorical data representations before diving into feature engineering strategies. Typically, any data attribute which is categorical in nature represents discrete values which belong to a specific finite set of categories or classes. These are also often known as classes or labels in the context of attributes or variables which are to be predicted by a model (popularly known as response variables). These discrete values can be text or numeric in nature (or even unstructured data like images!). There are two major classes of categorical data, nominal and ordinal. In any nominal categorical data attribute, there is no concept of ordering amongst the values of that attribute. Consider a simple example of weather categories, as depicted in the following figure. We can see that we have six major classes or categories in this particular scenario without any concept or notion of order (windy doesn’t always occur before sunny nor is it smaller or bigger than sunny). Weather as a categorical attribute Similarly movie, music and video game genres, country names, food and cuisine types are other examples of nominal categorical attributes. Ordinal categorical attributes have some sense or notion of order amongst its values. For instance look at the following figure for shirt sizes. It is quite evident that order or in this case ‘size’ matters when thinking about shirts (S is smaller than M which is smaller than L and so on). Shirt size as an ordinal categorical attribute Shoe sizes, education level and employment roles are some other examples of ordinal categorical attributes. Having a decent idea about categorical data, let’s now look at some feature engineering strategies. Feature Engineering on Categorical Data While a lot of advancements have been made in various machine learning frameworks to accept complex categorical data types like text labels. Typically any standard workflow in feature engineering involves some form of transformation of these categorical values into numeric labels and then applying some encoding scheme on these values. We load up the necessary essentials before getting started. import pandas as pd import numpy as np Transforming Nominal Attributes Nominal attributes consist of discrete categorical values with no notion or sense of order amongst them. The idea here is to transform these attributes into a more representative numerical format which can be easily understood by downstream code and pipelines. Let’s look at a new dataset pertaining to video game sales. This dataset is also available on Kaggle as well as in my GitHub repository. vg_df = pd.read_csv('datasets/vgsales.csv', encoding='utf-8') vg_df[['Name', 'Platform', 'Year', 'Genre', 'Publisher']].iloc[1:7] Dataset for video game sales Let’s focus on the video game Genre attribute as depicted in the above data frame. It is quite evident that this is a nominal categorical attribute just like Publisher and Platform . We can easily get the list of unique video game genres as follows. genres = np.unique(vg_df['Genre']) genres Output ------ array(['Action', 'Adventure', 'Fighting', 'Misc', 'Platform', 'Puzzle', 'Racing', 'Role-Playing', 'Shooter', 'Simulation', 'Sports', 'Strategy'], dtype=object) This tells us that we have 12 distinct video game genres. We can now generate a label encoding scheme for mapping each category to a numeric value by leveraging scikit-learn . from sklearn.preprocessing import LabelEncoder gle = LabelEncoder() genre_labels = gle.fit_transform(vg_df['Genre']) genre_mappings = {index: label for index, label in enumerate(gle.classes_)} genre_mappings Output ------ {0: 'Action', 1: 'Adventure', 2: 'Fighting', 3: 'Misc', 4: 'Platform', 5: 'Puzzle', 6: 'Racing', 7: 'Role-Playing', 8: 'Shooter', 9: 'Simulation', 10: 'Sports', 11: 'Strategy'} Thus a mapping scheme has been generated where each genre value is mapped to a number with the help of the LabelEncoder object gle . The transformed labels are stored in the genre_labels value which we can write back to our data frame. vg_df['GenreLabel'] = genre_labels vg_df[['Name', 'Platform', 'Year', 'Genre', 'GenreLabel']].iloc[1:7] Video game genres with their encoded labels These labels can be used directly often especially with frameworks like scikitlearn if you plan to use them as response variables for prediction, however as discussed earlier, we will need an additional step of encoding on these before we can use them as features. Transforming Ordinal Attributes Ordinal attributes are categorical attributes with a sense of order amongst the values. Let’s consider our Pokémon dataset which we used in Part 1 of this series. Let’s focus more specifically on the Generation attribute. poke_df = pd.read_csv('datasets/Pokemon.csv', encoding='utf-8') poke_df = poke_df.sample(random_state=1, frac=1).reset_index(drop=True) np.unique(poke_df['Generation']) Output ------ array(['Gen 1', 'Gen 2', 'Gen 3', 'Gen 4', 'Gen 5', 'Gen 6'], dtype=object) Based on the above output, we can see there are a total of 6 generations and each Pokémon typically belongs to a specific generation based on the video games (when they were released) and also the television series follows a similar timeline. This attribute is typically ordinal (domain knowledge is necessary here) because most Pokémon belonging to Generation 1 were introduced earlier in the video games and the television shows than Generation 2 as so on. Fans can check out the following figure to remember some of the popular Pokémon of each generation (views may differ among fans!). Popular Pokémon based on generation and type (source: https://www.reddit.com/r/pokemon/comments/2s2upx/heres_my_favorite_pokemon_by_type_and_gen_chart) Hence they have a sense of order amongst them. In general, there is no generic module or function to map and transform these features into numeric representations based on order automatically. Hence we can use a custom encoding\mapping scheme. gen_ord_map = {'Gen 1': 1, 'Gen 2': 2, 'Gen 3': 3, 'Gen 4': 4, 'Gen 5': 5, 'Gen 6': 6} poke_df['GenerationLabel'] = poke_df['Generation'].map(gen_ord_map) poke_df[['Name', 'Generation', 'GenerationLabel']].iloc[4:10] Pokémon generation encoding It is quite evident from the above code that the map(…) function from pandas is quite helpful in transforming this ordinal feature. Encoding Categorical Attributes If you remember what we mentioned earlier, typically feature engineering on categorical data involves a transformation process which we depicted in the previous section and a compulsory encoding process where we apply specific encoding schemes to create dummy variables or features for each category\value in a specific categorical attribute. You might be wondering, we just converted categories to numerical labels in the previous section, why on earth do we need this now? The reason is quite simple. Considering video game genres, if we directly fed the GenreLabel attribute as a feature in a machine learning model, it would consider it to be a continuous numeric feature thinking value 10 (Sports) is greater than 6 (Racing) but that is meaningless because the Sports genre is certainly not bigger or smaller than Racing, these are essentially different values or categories which cannot be compared directly. Hence we need an additional layer of encoding schemes where dummy features are created for each unique value or category out of all the distinct categories per attribute. One-hot Encoding Scheme Considering we have the numeric representation of any categorical attribute with m labels (after transformation), the one-hot encoding scheme, encodes or transforms the attribute into m binary features which can only contain a value of 1 or 0. Each observation in the categorical feature is thus converted into a vector of size m with only one of the values as 1 (indicating it as active). Let’s take a subset of our Pokémon dataset depicting two attributes of interest. poke_df[['Name', 'Generation', 'Legendary']].iloc[4:10] Subset of our Pokémon dataset The attributes of interest are Pokémon Generation and their Legendary status. The first step is to transform these attributes into numeric representations based on what we learnt earlier. from sklearn.preprocessing import OneHotEncoder, LabelEncoder # transform and map pokemon generations gen_le = LabelEncoder() gen_labels = gen_le.fit_transform(poke_df['Generation']) poke_df['Gen_Label'] = gen_labels # transform and map pokemon legendary status leg_le = LabelEncoder() leg_labels = leg_le.fit_transform(poke_df['Legendary']) poke_df['Lgnd_Label'] = leg_labels poke_df_sub = poke_df[['Name', 'Generation', 'Gen_Label', 'Legendary', 'Lgnd_Label']] poke_df_sub.iloc[4:10] Attributes with transformed (numeric) labels The features Gen_Label and Lgnd_Label now depict the numeric representations of our categorical features. Let’s now apply the one-hot encoding scheme on these features. # encode generation labels using one-hot encoding scheme gen_ohe = OneHotEncoder() gen_feature_arr = gen_ohe.fit_transform( poke_df[['Gen_Label']]).toarray() gen_feature_labels = list(gen_le.classes_) gen_features = pd.DataFrame(gen_feature_arr, columns=gen_feature_labels) # encode legendary status labels using one-hot encoding scheme leg_ohe = OneHotEncoder() leg_feature_arr = leg_ohe.fit_transform( poke_df[['Lgnd_Label']]).toarray() leg_feature_labels = ['Legendary_'+str(cls_label) for cls_label in leg_le.classes_] leg_features = pd.DataFrame(leg_feature_arr, columns=leg_feature_labels) In general, you can always encode both the features together using the fit_transform(…) function by passing it a two dimensional array of the two features together (Check out the documentation!). But we encode each feature separately, to make things easier to understand. Besides this, we can also create separate data frames and label them accordingly. Let’s now concatenate these feature frames and see the final result. poke_df_ohe = pd.concat([poke_df_sub, gen_features, leg_features], axis=1) columns = sum([['Name', 'Generation', 'Gen_Label'], gen_feature_labels, ['Legendary', 'Lgnd_Label'], leg_feature_labels], []) poke_df_ohe[columns].iloc[4:10] One-hot encoded features for Pokémon generation and legendary status Thus you can see that 6 dummy variables or binary features have been created for Generation and 2 for Legendary since those are the total number of distinct categories in each of these attributes respectively. Active state of a category is indicated by the 1 value in one of these dummy variables which is quite evident from the above data frame. Consider you built this encoding scheme on your training data and built some model and now you have some new data which has to be engineered for features before predictions as follows. new_poke_df = pd.DataFrame([['PikaZoom', 'Gen 3', True], ['CharMyToast', 'Gen 4', False]], columns=['Name', 'Generation', 'Legendary']) new_poke_df Sample new data You can leverage scikit-learn’s excellent API here by calling the transform(…) function of the previously build LabeLEncoder and OneHotEncoder objects on the new data. Remember our workflow, first we do the transformation. new_gen_labels = gen_le.transform(new_poke_df['Generation']) new_poke_df['Gen_Label'] = new_gen_labels new_leg_labels = leg_le.transform(new_poke_df['Legendary']) new_poke_df['Lgnd_Label'] = new_leg_labels new_poke_df[['Name', 'Generation', 'Gen_Label', 'Legendary', 'Lgnd_Label']] Categorical attributes after transformation Once we have numerical labels, let’s apply the encoding scheme now! new_gen_feature_arr = gen_ohe.transform(new_poke_df[['Gen_Label']]).toarray() new_gen_features = pd.DataFrame(new_gen_feature_arr, columns=gen_feature_labels) new_leg_feature_arr = leg_ohe.transform(new_poke_df[['Lgnd_Label']]).toarray() new_leg_features = pd.DataFrame(new_leg_feature_arr, columns=leg_feature_labels) new_poke_ohe = pd.concat([new_poke_df, new_gen_features, new_leg_features], axis=1) columns = sum([['Name', 'Generation', 'Gen_Label'], gen_feature_labels, ['Legendary', 'Lgnd_Label'], leg_feature_labels], []) new_poke_ohe[columns] Categorical attributes after one-hot encoding Thus you can see it’s quite easy to apply this scheme on new data easily by leveraging scikit-learn’s powerful API. You can also apply the one-hot encoding scheme easily by leveraging the to_dummies(…) function from pandas . gen_onehot_features = pd.get_dummies(poke_df['Generation']) pd.concat([poke_df[['Name', 'Generation']], gen_onehot_features], axis=1).iloc[4:10] One-hot encoded features by leveraging pandas The above data frame depicts the one-hot encoding scheme applied on the Generation attribute and the results are same as compared to the earlier results as expected. Dummy Coding Scheme The dummy coding scheme is similar to the one-hot encoding scheme, except in the case of dummy coding scheme, when applied on a categorical feature with m distinct labels, we get m - 1 binary features. Thus each value of the categorical variable gets converted into a vector of size m - 1. The extra feature is completely disregarded and thus if the category values range from {0, 1, …, m-1} the 0th or the m - 1th feature column is dropped and corresponding category values are usually represented by a vector of all zeros (0). Let’s try applying dummy coding scheme on Pokémon Generation by dropping the first level binary encoded feature ( Gen 1 ). gen_dummy_features = pd.get_dummies(poke_df['Generation'], drop_first=True) pd.concat([poke_df[['Name', 'Generation']], gen_dummy_features], axis=1).iloc[4:10] Dummy coded features for Pokémon g eneration If you want, you can also choose to drop the last level binary encoded feature ( Gen 6 ) as follows. gen_onehot_features = pd.get_dummies(poke_df['Generation']) gen_dummy_features = gen_onehot_features.iloc[:,:-1] pd.concat([poke_df[['Name', 'Generation']], gen_dummy_features], axis=1).iloc[4:10] Dummy coded features for Pokémon g eneration Open in app Search Based on the above depictions, it is quite clear that categories belonging to the dropped feature are represented as a vector of zeros (0) like we discussed earlier. Effect Coding Scheme The effect coding scheme is actually very similar to the dummy coding scheme, except during the encoding process, the encoded features or feature vector, for the category values which represent all 0 in the dummy coding scheme, is replaced by -1 in the effect coding scheme. This will become clearer with the following example. gen_onehot_features = pd.get_dummies(poke_df['Generation']) gen_effect_features = gen_onehot_features.iloc[:,:-1] gen_effect_features.loc[np.all(gen_effect_features == 0, axis=1)] = -1. pd.concat([poke_df[['Name', 'Generation']], gen_effect_features], axis=1).iloc[4:10] Effect coded features for Pokémon g eneration The above output clearly shows that the Pokémon belonging to Generation 6 are now represented by a vector of -1 values as compared to zeros in dummy coding. Bin-counting Scheme The encoding schemes we discussed so far, work quite well on categorical data in general, but they start causing problems when the number of distinct categories in any feature becomes very large. Essential for any categorical feature of m distinct labels, you get m separate features. This can easily increase the size of the feature set causing problems like storage issues, model training problems with regard to time, space and memory. Besides this, we also have to deal with what is popularly known as the ‘curse of dimensionality’ where basically with an enormous number of features and not enough representative samples, model performance starts getting affected often leading to overfitting. Hence we need to look towards other categorical data feature engineering schemes for features having a large number of possible categories (like IP addresses). The bin-counting scheme is a useful scheme for dealing with categorical variables having many categories. In this scheme, instead of using the actual label values for encoding, we use probability based statistical information about the value and the actual target or response value which we aim to predict in our modeling efforts. A simple example would be based on past historical data for IP addresses and the ones which were used in DDOS attacks; we can build probability values for a DDOS attack being caused by any of the IP addresses. Using this information, we can encode an input feature which depicts that if the same IP address comes in the future, what is the probability value of a DDOS attack being caused. This scheme needs historical data as a pre-requisite and is an elaborate one. Depicting this with a complete example would be currently difficult here but there are several resources online which you can refer to for the same. Feature Hashing Scheme The feature hashing scheme is another useful feature engineering scheme for dealing with large scale categorical features. In this scheme, a hash function is typically used with the number of encoded features pre-set (as a vector of predefined length) such that the hashed values of the features are used as indices in this pre-defined vector and values are updated accordingly. Since a hash function maps a large number of values into a small finite set of values, multiple different values might create the same hash which is termed as collisions. Typically, a signed hash function is used so that the sign of the value obtained from the hash is used as the sign of the value which is stored in the final feature vector at the appropriate index. This should ensure lesser collisions and lesser accumulation of error due to collisions. Hashing schemes work on strings, numbers and other structures like vectors. You can think of hashed outputs as a finite set of b bins such that when hash function is applied on the same values\categories, they get assigned to the same bin (or subset of bins) out of the b bins based on the hash value. We can pre-define the value of b which becomes the final size of the encoded feature vector for each categorical attribute that we encode using the feature hashing scheme. Thus even if we have over 1000 distinct categories in a feature and we set b=10 as the final feature vector size, the output feature set will still have only 10 features as compared to 1000 binary features if we used a one-hot encoding scheme. Let’s consider the Genre attribute in our video game dataset. unique_genres = np.unique(vg_df[['Genre']]) print("Total game genres:", len(unique_genres)) print(unique_genres) Output ------ Total game genres: 12 ['Action' 'Adventure' 'Fighting' 'Misc' 'Platform' 'Puzzle' 'Racing' 'Role-Playing' 'Shooter' 'Simulation' 'Sports' 'Strategy'] We can see that there are a total of 12 genres of video games. If we used a one-hot encoding scheme on the Genre feature, we would end up having 12 binary features. Instead, we will now use a feature hashing scheme by leveraging scikit-learn’s FeatureHasher class, which uses a signed 32-bit version of the Murmurhash3 hash function. We will pre-define the final feature vector size to be 6 in this case. from sklearn.feature_extraction import FeatureHasher fh = FeatureHasher(n_features=6, input_type='string') hashed_features = fh.fit_transform(vg_df['Genre']) hashed_features = hashed_features.toarray() pd.concat([vg_df[['Name', 'Genre']], pd.DataFrame(hashed_features)], axis=1).iloc[1:7] Feature Hashing on the Genre attribute Based on the above output, the Genre categorical attribute has been encoded using the hashing scheme into 6 features instead of 12. We can also see that rows 1 and 6 denote the same genre of games, Platform which have been rightly encoded into the same feature vector. Conclusion These examples should give you a good idea about popular strategies for feature engineering on discrete, categorical data. If you read Part 1 of this series, you would have seen that it is slightly challenging to work with categorical data as compared to continuous, numeric data but definitely interesting! We also talked about some ways to handle large feature spaces using feature engineering but you should also remember that there are other techniques including feature selection and dimensionality reduction methods to handle large feature spaces. We will cover some of these methods in a later article. Next up will be feature engineering strategies for unstructured text data. Stay tuned! To read about feature engineering strategies for continuous numeric data, check out Part 1 of this series! All the code and datasets used in this article can be accessed from my GitHub The code is also available as a Jupyter notebook Machine Learning Data Science Feature Engineering Programming Tds Feature Engineering In this article, we will look at various feature engineering techniques for extracting useful information using the datetime column. And if you’re new to time series, I encourage you to check out the below free course: Creating Time Series Forecast using Python Table of contents Overview Introduction Quick Introduction to Time Series Setting up the Problem Statement for Time Series Data Feature Engineering for Time Series #1: Date-Related Features Feature Engineering for Time Series #2: Time-Based Features Feature Engineering for Time Series #3: Lag Features Feature Engineering for Time Series #4: Rolling Window Feature Feature Engineering for Time Series #5: Expanding Window Feature Feature Engineering for Time Series #6: Domain-Specific Features Validation Technique for Time Series Frequently Asked Questions End Notes Quick Introduction to Time Series Before we look at the feature engineering techniques, let’s brush over some basic time series concepts. We’ll be using them throughout the article so it’s best to be acquainted with them here. So, what makes time series projects different from the traditional machine learning problems? Let’s take a simple example to understand this. If we want to predict today’s stock price for a certain company, it would be helpful to have information about yesterday’s closing price, right? Similarly, predicting the traffic on a website would be a lot easier if we have data about the last few months or years. There’s another thing we need to consider – time series data may also have certain trends or seasonality. Take a look at the plot shown below about the number of tickets booked for an airline over the years: “ In a time series, the data is captured at equal intervals and each successive data point in the series depends on its past values. view raw view raw We can clearly see an increasing trend. Such information can be useful for making more accurate predictions. Now, let’s take a dataset with date-time variables and start learning about feature engineering! Setting up the Problem Statement for Time Series Data We’ll be working on a fascinating problem to learn feature engineering techniques for time series. We have the historical data for ‘JetRail’, a form of public rail transport, that uses advanced technology to run rails at a high speed. JetRail’s usage has increased recently and we have to forecast the traffic on JetRail for the next 7 months based on past data. Let’s see how we can help JetRail’s management team solve this problem. You can go through the detailed problem statement and download the dataset from here. Let’s load the dataset in our notebook: loading_data.py hosted with ❤ by GitHub We have two columns here – so it’s clearly a univariate time series. Also, the data type of the date variable is taken as an object, i.e. it is being treated as a categorical variable. Hence, we will need to convert this into a DateTime variable. We can do this using the appropriately titled datetime function in Pandas: changing_dtype.py hosted with ❤ by GitHub Now that we have the data ready, let’s look at the different features we can engineer from this variable. Along with each of these feature engineering techniques, we will discuss different scenarios where that particular technique can be useful. NOTE: I have taken a simple time series problem to demonstrate the different feature engineering techniques in this article. You can use them on a dataset of your choice as long as the date-time column is present. Feature Engineering for Time Series #1: Date-Related Features Have you ever worked in a product company? You’ll be intimately familiar with the task of forecasting the sales for a particular product. We can find out the sales pattern for weekdays and weekends based on historical data. Thus, having information about the day, month, year, etc. can be useful for forecasting the values. Let’s get back to our JetRail project. We have to forecast the count of people who will take the JetRail on an hourly basis for the next 7 months. This number could be higher for weekdays and lower for weekends or during the festive seasons. Hence, the day of the week (weekday or weekend) or month will be an important factor. Extracting these features is really easy in Python: 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data.dtypes 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 data.dtypes 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') view raw view raw date_features.py hosted with ❤ by GitHub Feature Engineering for Time Series #2: Time-Based Features We can similarly extract more granular features if we have the time stamp. For instance, we can determine the hour or minute of the day when the data was recorded and compare the trends between the business hours and non-business hours. If we are able to extract the ‘hour’ feature from the time stamp, we can make more insightful conclusions about the data. We could find out if the traffic on JetRail is higher during the morning, afternoon or evening time. Or we could use the value to determine the average hourly traffic throughout the week, i.e. the number of people who used JetRail between 9-10 am, 10-11 am, and so on (throughout the week). Extracting time-based features is very similar to what we did above when extracting date-related features. We start by converting the column to DateTime format and use the .dt accessor. Here’s how to do it in Python: time_features.py hosted with ❤ by GitHub Similarly, we can extract a number of features from the date column. Here’s a complete list of features that we can generate: 4 5 data['year']=data['Datetime'].dt.year 6 data['month']=data['Datetime'].dt.month 7 data['day']=data['Datetime'].dt.day 8 9 data['dayofweek_num']=data['Datetime'].dt.dayofweek 10 data['dayofweek_name']=data['Datetime'].dt.weekday_name 11 12 data.head() 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 5 data['Hour'] = data['Datetime'].dt.hour 6 data['minute'] = data['Datetime'].dt.minute 7 8 data.head() Run the code below to generate the date and hour features for the given data. You can select any of the above functions and run the following code to generate a new feature for the same! @ L a k s h a y A r o r a 1 / Time B a s e d F e a t u r e s A P y t h o n r e p l b y L a k s h a y A r o r a 1 O p e n o n R e p lit S h o w c o d e 0 R u n L A view raw view raw Feature Engineering for Time Series #3: Lag Features Here’s something most aspiring data scientists don’t think about when working on a time series problem – we can also use the target variable for feature engineering! Consider this – you are predicting the stock price for a company. So, the previous day’s stock price is important to make a prediction, right? In other words, the value at time t is greatly affected by the value at time t-1. The past values are known as lags, so t-1 is lag 1, t-2 is lag 2, and so on. lag_feature.py hosted with ❤ by GitHub Here, we were able to generate lag one feature for our series. But why lag one? Why not five or seven? That’s a good question. If the series has a weekly trend, which means the value last Monday can be used to predict the value for this Monday, you should create lag features for seven days. Getting the drift? We can create multiple lag features as well! Let’s say we want lag 1 to lag 7 – we can let the model decide which is the most valuable one. So, if we train a linear regression model, it will assign appropriate weights (or coefficients) to the lag features: lag_seven.py hosted with ❤ by GitHub 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 5 data['lag_1'] = data['Count'].shift(1) 6 data = data[['Datetime', 'lag_1', 'Count']] 7 data.head() ‘ The lag value we choose will depend on the correlation of individual values with its past values. 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 5 data['lag_1'] = data['Count'].shift(1) 6 data['lag_2'] = data['Count'].shift(2) 7 data['lag_3'] = data['Count'].shift(3) 8 data['lag_4'] = data['Count'].shift(4) 9 data['lag_5'] = data['Count'].shift(5) 10 data['lag_6'] = data['Count'].shift(6) 11 data['lag_7'] = data['Count'].shift(7) 12 13 data = data[['Datetime', 'lag_1', 'lag_2', 'lag_3', 'lag_4', 'lag_5', 'lag_6', 'lag_7', 'Count']] 14 data.head(10) There is more than one way of determining the lag at which the correlation is significant. For instance, we can use the ACF (Autocorrelation Function) and PACF (Partial Autocorrelation Function) plots. ACF: The ACF plot is a measure of the correlation between the time series and the lagged version of itself PACF: The PACF plot is a measure of the correlation between the time series with a lagged version of itself but after eliminating the variations already explained by the intervening comparisons For our particular example, here are the ACF and PACF plots: from statsmodels.graphics.tsaplots import plot_acf plot_acf(data['Count'], lags=10) plot_pacf(data['Count'], lags=10) An important point to note – the number of times you shift, the same number of values will be reduced from the data. You would see some rows with NaNs at the start. That’s because the first observation has no lag. You’ll need to discard these rows from the training data. Feature Engineering for Time Series #4: Rolling Window Feature In the last section, we looked at how we can use the previous values as features. Here’s an awesome gif that explains this idea in a wonderfully intuitive way: ‘ The partial autocorrelation function shows a high correlation with the first lag and lesser correlation with the second and third lag. The autocorrelation function shows a slow decay, which means that the future values have a very high correlation with its past values. ‘ How about calculating some statistical values based on past values? This method is called the rolling window method because the window would be different for every data point. view raw Since this looks like a window that is sliding with every next point, the features generated using this method are called the ‘rolling window’ features. Now the question we need to address – how are we going to perform feature engineering here? Let’s start simple. We will select a window size, take the average of the values in the window, and use it as a feature. Let’s implement it in Python: rolling_mean.py hosted with ❤ by GitHub Similarly, you can consider the sum, min, max value, etc. (for the selected window) as a feature and try it out on your own machine. Thus, we can use a weighted average, such that higher weights are given to the most recent observations. Mathematically, weighted average at time t for the past 7 values would be: w_avg = w1*(t-1) + w2*(t-2) + . . . . + w7*(t-7) where, w1>w2>w3> . . . . >w7. 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 5 data['rolling_mean'] = data['Count'].rolling(window=7).mean() 6 data = data[['Datetime', 'rolling_mean', 'Count']] 7 data.head(10) ‘ Recency in an important factor in a time series. Values closer to the current date would hold more information. view raw Feature Engineering for Time Series #5: Expanding Window Feature This is simply an advanced version of the rolling window technique. In the case of a rolling window, the size of the window is constant while the window slides as we move forward in time. Hence, we consider only the most recent values and ignore the past values. Here’s a gif that explains how our expanding window function works: As you can see, with every step, the size of the window increases by one as it takes into account every new value in the series. This can be implemented easily in Python by using the expanding() function. Let’s code this using the same data: expanding_window.py hosted with ❤ by GitHub Here is a live coding window that generates the expanding window feature for the given data. Feel free to change the starting window size and print the results: ‘ The idea behind the expanding window feature is that it takes all the past values into account. 1 import pandas as pd 2 data = pd.read_csv('Train_SU63ISt.csv') 3 data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') 4 5 data['expanding_mean'] = data['Count'].expanding(2).mean() 6 data = data[['Datetime','Count', 'expanding_mean']] 7 data.head(10) @ L a k s h a y A r o r a 1 / E x p a n din g Win d o w F e a t u r e A P y t h o n r e p l b y L a k s h a y A r o r a 1 O p e n o n R e p lit S h o w c o d e 0 R u n 1 3 L A Feature Engineering for Time Series #6: Domain-Specific Features This is the essence of feature engineering! Want to dive into this more? Let’s take an example. Below is the data provided by a retailer for a number of stores and products. Our task is to forecast the future demands for the products. We can come up with various features, like taking a lag or averaging the past values, among other things. But hold on. Let me ask you a question – would it be the right way to build lag features from lag(1) to lag(7) throughout the data? Certainly not! There are different stores and products, and the demand for each store and product would be significantly different. In this case, we can create lag features considering the store-product combination. Moreover, if we have knowledge about the products and the trends in the market, we would be able to generate more accurate (and fewer) features. Not only this, having a good understanding about the domain and data would help us in selecting the lag value and the window size. Additionally, based on your domain knowledge, you would be able to pull external data that adds more value to the model. Here’s what I mean – are the sales affected by the weather on the day? Will the sales increase/decrease on a national holiday?If yes, then you can use external datasets and include the list of holidays as a feature. Validation Technique for Time Series All the feature engineering techniques we have discussed can be used to convert a time series problem into a supervised machine learning problem. Once we have that, we can easily go ahead with machine learning algorithms like linear regression and random forest. But there is one important step that you should know before you jump to the model building process – creating a validation setfor time series. For the traditional machine learning problems, we randomly select subsets of data for the validation and test sets. But in these cases, each data point is dependent on its past values. If we randomly shuffle the data, we might be training on future data and predicting the past values! ‘ Having a good understanding of the problem statement, clarity of the end objective and knowledge of the available data is essential to engineer domain-specific features for the model. Let’s create a validation set for our problem. But first, we must check the duration for which we have the data: import pandas as pd data = pd.read_csv('Train_SU63ISt.csv') data['Datetime'] = pd.to_datetime(data['Datetime'],format='%d-%m-%Y %H:%M') data['Datetime'].min(), data['Datetime'].max(), (data['Datetime'].max() -data['Datetime'].min()) (Timestamp('2012-08-25 00:00:00'), Timestamp('2014-09-25 23:00:00'), Timedelta('761 days 23:00:00')) We have data for almost 25 months. Let’s save three months for validation and use the remaining for training: data.index = data.Datetime Train=data.loc['2012-08-25':'2014-06-24'] valid=data.loc['2014-06-25':'2014-09-25'] Train.shape, valid.shape ((16056, 3), (2232, 3)) Great! We have the train and validation sets ready. You can now use these feature engineering techniques and build machine learning models on this data! Frequently Asked Questions Q1. What are the features oftime series? A. The features of a time series are the characteristics and patterns observed within the data over time. Some of the key features include: 1. Trend: The long-term movement or direction in the data, indicating overall growth or decline. 2. Seasonality: Regular and predictable patterns that repeat at fixed intervals. 3. Cyclic Patterns: Longer-term oscillations with varying periods, not necessarily repeating at fixed intervals. 4. Noise: Random fluctuations or irregularities in the data that do not follow any specific pattern. 5. Autocorrelation: The correlation of a time series with its own past values at different lags. 6. Level: The baseline or starting point of the time series data. Understanding these features is essential for time series analysis and forecasting. Q2. Whatis time series feature extraction method? A. Time series feature extraction methods involve transforming raw time series data into a set of relevant and informative features. Techniques like moving averages, exponential smoothing, Fourier transforms, wavelet transforms, and statistical measures (mean, variance, etc.) are used to extract characteristics such as trend, seasonality, periodicity, and statistical properties. These features are then used for time series analysis, classification, and forecasting tasks. End Notes Time Series is often considered a difficult topic to master. That’s understandable because there are a lot of moving parts when we’re working with the date and time components. But once you have a hang of the basic concepts and are able to perform feature engineering, you’ll be gliding through your projects in no time. In this article, we discussed some simple techniques that you can use to work with time series data. Using these feature engineering techniques, we can convert any time series problem into a supervised learning problem and build regression models. ‘ It is important that we carefully build a validation set when working on a time series problem, without destroying the sequential order within the data.  NNaavviiggaattiioonn Click to Take the FREE Probability Crash-Course Search...   228 How to Use ROC Curves and Precision-Recall Curves for Classification in Python by Jason Brownlee on October 11, 2023 in Probability It can be more flexible to predict probabilities of an observation belonging to each class in a classification problem rather than predicting classes directly. This flexibility comes from the way that probabilities may be interpreted using different thresholds that allow the operator of the model to trade-off concerns in the errors made by the model, such as the number of false positives compared to the number of false negatives. This is required when using models where the cost of one error outweighs the cost of other types of errors. Two diagnostic tools that help in the interpretation of probabilistic forecast for binary (two-class) classification predictive modeling problems are ROC Curves and Precision-Recall curves. In this tutorial, you will discover ROC Curves, Precision-Recall Curves, and when to use each to interpret the prediction of probabilities for binary classification problems. After completing this tutorial, you will know: ROC Curves summarize the trade-off between the true positive rate and false positive rate for a predictive model using different probability thresholds. Precision-Recall curves summarize the trade-off between the true positive rate and the positive predictive value for a predictive model using different probability thresholds. ROC curves are appropriate when the observations are balanced between each class, whereas precision-recall curves are appropriate for imbalanced datasets. Kick-start your project with my new book Probability for Machine Learning, including step-by-step tutorials and the Python source code files for all examples. Let’s get started. Update Aug/2018: Fixed bug in the representation of the no skill line for the precision-recall plot. Also fixed typo where I referred to ROC as relative rather than receiver (thanks spellcheck). Share Tweet Share Update Nov/2018: Fixed description on interpreting size of values on each axis, thanks Karl Humphries. Update Jun/2019: Fixed typo when interpreting imbalanced results. Update Oct/2019: Updated ROC Curve and Precision Recall Curve plots to add labels, use a logistic regression model and actually compute the performance of the no skill classifier. Update Nov/2019: Improved description of no skill classifier for precision-recall curve. Update Oct/2023: Minor update on code to make it more Pythonic How and When to Use ROC Curves and Precision-Recall Curves for Classification in Python Photo by Giuseppe Milo, some rights reserved. Tutorial Overview This tutorial is divided into 6 parts; they are: 1. Predicting Probabilities 2. What Are ROC Curves? 3. ROC Curves and AUC in Python 4. What Are Precision-Recall Curves? 5. Precision-Recall Curves and AUC in Python 6. When to Use ROC vs. Precision-Recall Curves? Predicting Probabilities In a classification problem, we may decide to predict the class values directly. Alternately, it can be more flexible to predict the probabilities for each class instead. The reason for this is to provide the capability to choose and even calibrate the threshold for how to interpret the predicted probabilities. For example, a default might be to use a threshold of 0.5, meaning that a probability in [0.0, 0.49] is a negative outcome (0) and a probability in [0.5, 1.0] is a positive outcome (1). This threshold can be adjusted to tune the behavior of the model for a specific problem. An example would be to reduce more of one or another type of error. When making a prediction for a binary or two-class classification problem, there are two types of errors that we could make. False Positive. Predict an event when there was no event. False Negative. Predict no event when in fact there was an event. By predicting probabilities and calibrating a threshold, a balance of these two concerns can be chosen by the operator of the model. For example, in a smog prediction system, we may be far more concerned with having low false negatives than low false positives. A false negative would mean not warning about a smog day when in fact it is a high smog day, leading to health issues in the public that are unable to take precautions. A false positive means the public would take precautionary measures when they didn’t need to. A common way to compare models that predict probabilities for two-class problems is to use a ROC curve. What Are ROC Curves? A useful tool when predicting the probability of a binary outcome is the Receiver Operating Characteristic curve, or ROC curve. It is a plot of the false positive rate (x-axis) versus the true positive rate (y-axis) for a number of different candidate threshold values between 0.0 and 1.0. Put another way, it plots the false alarm rate versus the hit rate. The true positive rate is calculated as the number of true positives divided by the sum of the number of true positives and the number of false negatives. It describes how good the model is at predicting the positive class when the actual outcome is positive. The true positive rate is also referred to as sensitivity. The false positive rate is calculated as the number of false positives divided by the sum of the number of false positives and the number of true negatives. It is also called the false alarm rate as it summarizes how often a positive class is predicted when the actual outcome is negative. 1 True Positive Rate = True Positives / (True Positives + False Negatives) 1 Sensitivity = True Positives / (True Positives + False Negatives) 1 False Positive Rate = False Positives / (False Positives + True Negatives) The false positive rate is also referred to as the inverted specificity where specificity is the total number of true negatives divided by the sum of the number of true negatives and false positives. Where: The ROC curve is a useful tool for a few reasons: The curves of different models can be compared directly in general or for different thresholds. The area under the curve (AUC) can be used as a summary of the model skill. The shape of the curve contains a lot of information, including what we might care about most for a problem, the expected false positive rate, and the false negative rate. To make this clear: Smaller values on the x-axis of the plot indicate lower false positives and higher true negatives. Larger values on the y-axis of the plot indicate higher true positives and lower false negatives. If you are confused, remember, when we predict a binary outcome, it is either a correct prediction (true positive) or not (false positive). There is a tension between these options, the same with true negative and false negative. A skilful model will assign a higher probability to a randomly chosen real positive occurrence than a negative occurrence on average. This is what we mean when we say that the model has skill. Generally, skilful models are represented by curves that bow up to the top left of the plot. A no-skill classifier is one that cannot discriminate between the classes and would predict a random class or a constant class in all cases. A model with no skill is represented at the point (0.5, 0.5). A model with no skill at each threshold is represented by a diagonal line from the bottom left of the plot to the top right and has an AUC of 0.5. A model with perfect skill is represented at a point (0,1). A model with perfect skill is represented by a line that travels from the bottom left of the plot to the top left and then across the top to the top right. An operator may plot the ROC curve for the final model and choose a threshold that gives a desirable balance between the false positives and false negatives. Want to Learn Probability for Machine Learning Take my free 7-day email crash course now (with sample code). Click to sign-up and also get a free PDF Ebook version of the course. Download Your FREE Mini-Course 1 Specificity = True Negatives / (True Negatives + False Positives) 1 False Positive Rate = 1 - Specificity ROC Curves and AUC in Python We can plot a ROC curve for a model in Python using the roc_curve() scikit-learn function. The function takes both the true outcomes (0,1) from the test set and the predicted probabilities for the 1 class. The function returns the false positive rates for each threshold, true positive rates for each threshold and thresholds. The AUC for the ROC can be calculated using the roc_auc_score() function. Like the roc_curve() function, the AUC function takes both the true outcomes (0,1) from the test set and the predicted probabilities for the 1 class. It returns the AUC score between 0.0 and 1.0 for no skill and perfect skill respectively. A complete example of calculating the ROC curve and ROC AUC for a Logistic Regression model on a small test problem is listed below. 1 2 3 ... # calculate roc curve fpr, tpr, thresholds = roc_curve(y, probs) 1 2 3 4 ... # calculate AUC auc = roc_auc_score(y, probs) print('AUC: %.3f' % auc) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 import matplotlib.pyplot as plt # roc curve and auc from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score # generate 2 class dataset X, y = make_classification(n_samples=1000, n_classes=2, random_state=1) # split into train/test sets trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.5, random_state=2) # generate a no skill prediction (majority class) ns_probs = [0 for _ in range(len(testy))] # fit a model model = LogisticRegression(solver='lbfgs') model.fit(trainX, trainy) # predict probabilities lr_probs = model.predict_proba(testX) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] # calculate scores ns_auc = roc_auc_score(testy, ns_probs) lr_auc = roc_auc_score(testy, lr_probs) # summarize scores print('No Skill: ROC AUC=%.3f' % (ns_auc)) print('Logistic: ROC AUC=%.3f' % (lr_auc)) # calculate roc curves ns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs) lr_fpr, lr_tpr, _ = roc_curve(testy, lr_probs) # plot the roc curve for the model plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic') # axis labels Running the example prints the ROC AUC for the logistic regression model and the no skill classifier that only predicts 0 for all examples. A plot of the ROC curve for the model is also created showing that the model has skill. Note: Your results may vary given the stochastic nature of the algorithm or evaluation procedure, or differences in numerical precision. Consider running the example a few times and compare the average outcome. ROC Curve Plot for a No Skill Classifier and a Logistic Regression Model What Are Precision-Recall Curves? There are many ways to evaluate the skill of a prediction model. An approach in the related field of information retrieval (finding documents based on queries) measures precision and recall. 35 36 37 38 39 40 plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend() # show the plot plt.show() 1 2 No Skill: ROC AUC=0.500 Logistic: ROC AUC=0.903 These measures are also useful in applied machine learning for evaluating binary classification models. Precision is a ratio of the number of true positives divided by the sum of the true positives and false positives. It describes how good a model is at predicting the positive class. Precision is referred to as the positive predictive value. or Recall is calculated as the ratio of the number of true positives divided by the sum of the true positives and the false negatives. Recall is the same as sensitivity. or Reviewing both precision and recall is useful in cases where there is an imbalance in the observations between the two classes. Specifically, there are many examples of no event (class 0) and only a few examples of an event (class 1). The reason for this is that typically the large number of class 0 examples means we are less interested in the skill of the model at predicting class 0 correctly, e.g. high true negatives. Key to the calculation of precision and recall is that the calculations do not make use of the true negatives. It is only concerned with the correct prediction of the minority class, class 1. A precision-recall curve is a plot of the precision (y-axis) and the recall (x-axis) for different thresholds, much like the ROC curve. A no-skill classifier is one that cannot discriminate between the classes and would predict a random class or a constant class in all cases. The no-skill line changes based on the distribution of the positive to negative classes. It is a horizontal line with the value of the ratio of positive cases in the dataset. For a balanced dataset, this is 0.5. — The Precision-Recall Plot Is More Informative than the ROC Plot When Evaluating Binary Classifiers on Imbalanced Datasets, 2015. A model with perfect skill is depicted as a point at (1,1). A skilful model is represented by a curve that bows towards (1,1) above the flat line of no skill. While the baseline is fixed with ROC, the baseline of [precision-recall curve] is determined by the ratio of positives (P) and negatives (N) as y = P / (P + N). For instance, we have y = 0.5 for a balanced class distribution …  1 Positive Predictive Power = True Positives / (True Positives + False Positives) 1 Precision = True Positives / (True Positives + False Positives) 1 Recall = True Positives / (True Positives + False Negatives) 1 Sensitivity = True Positives / (True Positives + False Negatives) 1 Recall == Sensitivity There are also composite scores that attempt to summarize the precision and recall; two examples include: F-Measure or F1 score: that calculates the harmonic mean of the precision and recall (harmonic mean because the precision and recall are rates). Area Under Curve: like the AUC, summarizes the integral or an approximation of the area under the precision-recall curve. In terms of model selection, F-Measure summarizes model skill for a specific probability threshold (e.g. 0.5), whereas the area under curve summarize the skill of a model across thresholds, like ROC AUC. This makes precision-recall and a plot of precision vs. recall and summary measures useful tools for binary classification problems that have an imbalance in the observations for each class. Precision-Recall Curves in Python Precision and recall can be calculated in scikit-learn. The precision and recall can be calculated for thresholds using the precision_recall_curve() function that takes the true output values and the probabilities for the positive class as input and returns the precision, recall and threshold values. The F-Measure can be calculated by calling the f1_score() function that takes the true class values and the predicted class values as arguments. The area under the precision-recall curve can be approximated by calling the auc() function and passing it the recall (x) and precision (y) values calculated for each threshold. When plotting precision and recall for each threshold as a curve, it is important that recall is provided as the x-axis and precision is provided as the y-axis. The complete example of calculating precision-recall curves for a Logistic Regression model is listed below. 1 2 3 ... # calculate precision-recall curve precision, recall, thresholds = precision_recall_curve(testy, probs) 1 2 3 ... # calculate F1 score f1 = f1_score(testy, yhat) 1 2 3 ... # calculate precision-recall AUC auc = auc(recall, precision) 1 2 3 4 5 6 7 8 9 import matplotlib.pyplot as plt # precision-recall curve and f1 from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import precision_recall_curve from sklearn.metrics import f1_score from sklearn.metrics import auc Running the example first prints the F1, area under curve (AUC) for the logistic regression model. Note: Your results may vary given the stochastic nature of the algorithm or evaluation procedure, or differences in numerical precision. Consider running the example a few times and compare the average outcome. The precision-recall curve plot is then created showing the precision/recall for each threshold for a logistic regression model (orange) compared to a no skill model (blue). 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 # generate 2 class dataset X, y = make_classification(n_samples=1000, n_classes=2, random_state=1) # split into train/test sets trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.5, random_state=2) # fit a model model = LogisticRegression(solver='lbfgs') model.fit(trainX, trainy) # predict probabilities lr_probs = model.predict_proba(testX) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] # predict class values yhat = model.predict(testX) lr_precision, lr_recall, _ = precision_recall_curve(testy, lr_probs) lr_f1, lr_auc = f1_score(testy, yhat), auc(lr_recall, lr_precision) # summarize scores print('Logistic: f1=%.3f auc=%.3f' % (lr_f1, lr_auc)) # plot the precision-recall curves no_skill = len(testy[testy==1]) / len(testy) plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill') plt.plot(lr_recall, lr_precision, marker='.', label='Logistic') # axis labels plt.xlabel('Recall') plt.ylabel('Precision') # show the legend plt.legend() # show the plot plt.show() 1 Logistic: f1=0.841 auc=0.898 Precision-Recall Plot for a No Skill Classifier and a Logistic Regression Model When to Use ROC vs. Precision-Recall Curves? Generally, the use of ROC curves and precision-recall curves are as follows: ROC curves should be used when there are roughly equal numbers of observations for each class. Precision-Recall curves should be used when there is a moderate to large class imbalance. The reason for this recommendation is that ROC curves present an optimistic picture of the model on datasets with a class imbalance. — The Relationship Between Precision-Recall and ROC Curves, 2006. Some go further and suggest that using a ROC curve with an imbalanced dataset might be deceptive and lead to incorrect interpretations of the model skill. However, ROC curves can present an overly optimistic view of an algorithm’s performance if there is a large skew in the class distribution. […] Precision-Recall (PR) curves, often used in Information Retrieval , have been cited as an alternative to ROC curves for tasks with a large skew in the class distribution.  — The Precision-Recall Plot Is More Informative than the ROC Plot When Evaluating Binary Classifiers on Imbalanced Datasets, 2015. The main reason for this optimistic picture is because of the use of true negatives in the False Positive Rate in the ROC Curve and the careful avoidance of this rate in the Precision-Recall curve. — ROC Graphs: Notes and Practical Considerations for Data Mining Researchers, 2003. We can make this concrete with a short example. Below is the same ROC Curve example with a modified problem where there is a ratio of about 100:1 ratio of class=0 to class=1 observations (specifically Class0=985, Class1=15). […] the visual interpretability of ROC plots in the context of imbalanced datasets can be deceptive with respect to conclusions about the reliability of classification performance, owing to an intuitive but wrong interpretation of specificity. [Precision-recall curve] plots, on the other hand, can provide the viewer with an accurate prediction of future classification performance due to the fact that they evaluate the fraction of true positives among positive predictions  If the proportion of positive to negative instances changes in a test set, the ROC curves will not change. Metrics such as accuracy, precision, lift and F scores use values from both columns of the confusion matrix. As a class distribution changes these measures will change as well, even if the fundamental classifier performance does not. ROC graphs are based upon TP rate and FP rate, in which each dimension is a strict columnar ratio, so do not depend on class distributions.  1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 import matplotlib.pyplot as plt # roc curve and auc on an imbalanced dataset from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score # generate 2 class dataset X, y = make_classification(n_samples=1000, n_classes=2, weights=[0.99,0.01], random_state=1) # split into train/test sets trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.5, random_state=2) # generate a no skill prediction (majority class) ns_probs = [0 for _ in range(len(testy))] # fit a model model = LogisticRegression(solver='lbfgs') model.fit(trainX, trainy) # predict probabilities lr_probs = model.predict_proba(testX) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] # calculate scores ns_auc = roc_auc_score(testy, ns_probs) lr_auc = roc_auc_score(testy, lr_probs) # summarize scores print('No Skill: ROC AUC=%.3f' % (ns_auc)) print('Logistic: ROC AUC=%.3f' % (lr_auc)) # calculate roc curves Running the example suggests that the model has skill. Note: Your results may vary given the stochastic nature of the algorithm or evaluation procedure, or differences in numerical precision. Consider running the example a few times and compare the average outcome. Indeed, it has skill, but all of that skill is measured as making correct true negative predictions and there are a lot of negative predictions to make. If you review the predictions, you will see that the model predicts the majority class (class 0) in all cases on the test set. The score is very misleading. A plot of the ROC Curve confirms the AUC interpretation of a skilful model for most probability thresholds. 29 30 31 32 33 34 35 36 37 38 39 40 ns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs) lr_fpr, lr_tpr, _ = roc_curve(testy, lr_probs) # plot the roc curve for the model plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend() # show the plot plt.show() 1 2 No Skill: ROC AUC=0.500 Logistic: ROC AUC=0.716 ROC Curve Plot for a No Skill Classifier and a Logistic Regression Model for an Imbalanced Dataset We can also repeat the test of the same model on the same dataset and calculate a precision-recall curve and statistics instead. The complete example is listed below. 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 import matplotlib.pyplot as plt # precision-recall curve and f1 for an imbalanced dataset from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import precision_recall_curve from sklearn.metrics import f1_score from sklearn.metrics import auc # generate 2 class dataset X, y = make_classification(n_samples=1000, n_classes=2, weights=[0.99,0.01], random_state=1) # split into train/test sets trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.5, random_state=2) # fit a model model = LogisticRegression(solver='lbfgs') model.fit(trainX, trainy) # predict probabilities lr_probs = model.predict_proba(testX) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] # predict class values yhat = model.predict(testX) # calculate precision and recall for each threshold lr_precision, lr_recall, _ = precision_recall_curve(testy, lr_probs) # calculate scores Running the example first prints the F1 and AUC scores. Note: Your results may vary given the stochastic nature of the algorithm or evaluation procedure, or differences in numerical precision. Consider running the example a few times and compare the average outcome. We can see that the model is penalized for predicting the majority class in all cases. The scores show that the model that looked good according to the ROC Curve is in fact barely skillful when considered using using precision and recall that focus on the positive class. The plot of the precision-recall curve highlights that the model is just barely above the no skill line for most thresholds. This is possible because the model predicts probabilities and is uncertain about some cases. These get exposed through the different thresholds evaluated in the construction of the curve, flipping some class 0 to class 1, offering some precision but very low recall. 26 27 28 29 30 31 32 33 34 35 36 37 38 39 lr_f1, lr_auc = f1_score(testy, yhat), auc(lr_recall, lr_precision) # summarize scores print('Logistic: f1=%.3f auc=%.3f' % (lr_f1, lr_auc)) # plot the precision-recall curves no_skill = len(testy[testy==1]) / len(testy) plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill') plt.plot(lr_recall, lr_precision, marker='.', label='Logistic') # axis labels plt.xlabel('Recall') plt.ylabel('Precision') # show the legend plt.legend() # show the plot plt.show() 1 Logistic: f1=0.000 auc=0.054 Precision-Recall Plot for a No Skill Classifier and a Logistic Regression Model for am Imbalanced Dataset Further Reading This section provides more resources on the topic if you are looking to go deeper. Papers A critical investigation of recall and precision as measures of retrieval system performance, 1989. The Relationship Between Precision-Recall and ROC Curves, 2006. The Precision-Recall Plot Is More Informative than the ROC Plot When Evaluating Binary Classifiers on Imbalanced Datasets, 2015. ROC Graphs: Notes and Practical Considerations for Data Mining Researchers, 2003. API sklearn.metrics.roc_curve API sklearn.metrics.roc_auc_score API sklearn.metrics.precision_recall_curve API sklearn.metrics.auc API sklearn.metrics.average_precision_score API Precision-Recall, scikit-learn Precision, recall and F-measures, scikit-learn Articles Receiver operating characteristic on Wikipedia Sensitivity and specificity on Wikipedia Precision and recall on Wikipedia Information retrieval on Wikipedia F1 score on Wikipedia ROC and precision-recall with imbalanced datasets, blog. Summary In this tutorial, you discovered ROC Curves, Precision-Recall Curves, and when to use each to interpret the prediction of probabilities for binary classification problems. Specifically, you learned: ROC Curves summarize the trade-off between the true positive rate and false positive rate for a predictive model using different probability thresholds. Precision-Recall curves summarize the trade-off between the true positive rate and the positive predictive value for a predictive model using different probability thresholds. ROC curves are appropriate when the observations are balanced between each class, whereas precision-recall curves are appropriate for imbalanced datasets. Do you have any questions? Ask your questions in the comments below and I will do my best to answer. Get a Handle on Probability for Machine Learning! Develop Your Understanding of Probability ...with just a few lines of python code Discover how in my new Ebook: Probability for Machine Learning It provides self-study tutorials and end-to-end projects on: Bayes Theorem, Bayesian Optimization, Distributions, Maximum Likelihood, Cross-Entropy, Calibrating Models and much more... Finally Harness Uncertainty in Your Projects Skip the Academics. Just Results. SEE WHAT'S INSIDE  NNaavviiggaattiioonn Click to Take the FREE Imbalanced Classification Crash-Course Search...   200 A Gentle Introduction to Threshold-Moving for Imbalanced Classification by Jason Brownlee on January 5, 2021 in Imbalanced Classification Classification predictive modeling typically involves predicting a class label. Nevertheless, many machine learning algorithms are capable of predicting a probability or scoring of class membership, and this must be interpreted before it can be mapped to a crisp class label. This is achieved by using a threshold, such as 0.5, where all values equal or greater than the threshold are mapped to one class and all other values are mapped to another class. For those classification problems that have a severe class imbalance, the default threshold can result in poor performance. As such, a simple and straightforward approach to improving the performance of a classifier that predicts probabilities on an imbalanced classification problem is to tune the threshold used to map probabilities to class labels. In some cases, such as when using ROC Curves and Precision-Recall Curves, the best or optimal threshold for the classifier can be calculated directly. In other cases, it is possible to use a grid search to tune the threshold and locate the optimal value. In this tutorial, you will discover how to tune the optimal threshold when converting probabilities to crisp class labels for imbalanced classification. After completing this tutorial, you will know: The default threshold for interpreting probabilities to class labels is 0.5, and tuning this hyperparameter is called threshold moving. How to calculate the optimal threshold for the ROC Curve and Precision-Recall Curve directly. How to manually search threshold values for a chosen model and model evaluation metric. Kick-start your project with my new book Imbalanced Classification with Python, including step-by-step tutorials and the Python source code files for all examples. Let’s get started. Tweet Tweet Share Share × Update Feb/2020: Fixed typo in Specificity equation. Update Jan/2021: Updated links for API documentation. A Gentle Introduction to Threshold-Moving for Imbalanced Classification Photo by Bruna cs, some rights reserved. Tutorial Overview This tutorial is divided into five parts; they are: 1. Converting Probabilities to Class Labels 2. Threshold-Moving for Imbalanced Classification 3. Optimal Threshold for ROC Curve 4. Optimal Threshold for Precision-Recall Curve 5. Optimal Threshold Tuning Converting Probabilities to Class Labels × Many machine learning algorithms are capable of predicting a probability or a scoring of class membership. This is useful generally as it provides a measure of the certainty or uncertainty of a prediction. It also provides additional granularity over just predicting the class label that can be interpreted. Some classification tasks require a crisp class label prediction. This means that even though a probability or scoring of class membership is predicted, it must be converted into a crisp class label. The decision for converting a predicted probability or scoring into a class label is governed by a parameter referred to as the “decision threshold,” “discrimination threshold,” or simply the “threshold.” The default value for the threshold is 0.5 for normalized predicted probabilities or scores in the range between 0 or 1. For example, on a binary classification problem with class labels 0 and 1, normalized predicted probabilities and a threshold of 0.5, then values less than the threshold of 0.5 are assigned to class 0 and values greater than or equal to 0.5 are assigned to class 1. Prediction < 0.5 = Class 0 Prediction >= 0.5 = Class 1 The problem is that the default threshold may not represent an optimal interpretation of the predicted probabilities. This might be the case for a number of reasons, such as: The predicted probabilities are not calibrated, e.g. those predicted by an SVM or decision tree. The metric used to train the model is different from the metric used to evaluate a final model. The class distribution is severely skewed. The cost of one type of misclassification is more important than another type of misclassification. Worse still, some or all of these reasons may occur at the same time, such as the use of a neural network model with uncalibrated predicted probabilities on an imbalanced classification problem. As such, there is often the need to change the default decision threshold when interpreting the predictions of a model. — Page 53, Learning from Imbalanced Data Sets, 2018. Want to Get Started With Imbalance Classification? Take my free 7-day email crash course now (with sample code). … almost all classifiers generate positive or negative predictions by applying a threshold to a score. The choice of this threshold will have an impact in the trade-offs of positive and negative errors.  × Click to sign-up and also get a free PDF Ebook version of the course. Download Your FREE Mini-Course Threshold-Moving for Imbalanced Classification There are many techniques that may be used to address an imbalanced classification problem, such as resampling the training dataset and developing customized version of machine learning algorithms. Nevertheless, perhaps the simplest approach to handle a severe class imbalance is to change the decision threshold. Although simple and very effective, this technique is often overlooked by practitioners and research academics alike as was noted by Foster Provost in his 2000 article titled “Machine Learning from Imbalanced Data Sets.” — Machine Learning from Imbalanced Data Sets 101, 2000. There are many reasons to choose an alternative to the default decision threshold. For example, you may use ROC curves to analyze the predicted probabilities of a model and ROC AUC scores to compare and select a model, although you require crisp class labels from your model. How do you choose the threshold on the ROC Curve that results in the best balance between the true positive rate and the false positive rate? Alternately, you may use precision-recall curves to analyze the predicted probabilities of a model, precision-recall AUC to compare and select models, and require crisp class labels as predictions. How do you choose the threshold on the Precision-Recall Curve that results in the best balance between precision and recall? You may use a probability-based metric to train, evaluate, and compare models like log loss (crossentropy) but require crisp class labels to be predicted. How do you choose the optimal threshold from predicted probabilities more generally? Finally, you may have different costs associated with false positive and false negative misclassification, a so-called cost matrix, but wish to use and evaluate cost-insensitive models and later evaluate their predictions use a cost-sensitive measure. How do you choose a threshold that finds the best trade-off for predictions using the cost matrix? The bottom line is that when studying problems with imbalanced data, using the classifiers produced by standard machine learning algorithms without adjusting the output threshold may well be a critical mistake.  × — Page 67, Learning from Imbalanced Data Sets, 2018. The answer to these questions is to search a range of threshold values in order to find the best threshold. In some cases, the optimal threshold can be calculated directly. Tuning or shifting the decision threshold in order to accommodate the broader requirements of the classification problem is generally referred to as “threshold-moving,” “threshold-tuning,” or simply “thresholding.” — Pages 72, Imbalanced Learning: Foundations, Algorithms, and Applications, 2013. The process involves first fitting the model on a training dataset and making predictions on a test dataset. The predictions are in the form of normalized probabilities or scores that are transformed into normalized probabilities. Different threshold values are then tried and the resulting crisp labels are evaluated using a chosen evaluation metric. The threshold that achieves the best evaluation metric is then adopted for the model when making predictions on new data in the future. We can summarize this procedure below. 1. Fit Model on the Training Dataset. 2. Predict Probabilities on the Test Dataset. 3. For each threshold in Thresholds: 3a. Convert probabilities to Class Labels using the threshold. 3b. Evaluate Class Labels. 3c. If Score is Better than Best Score. 3ci. Adopt Threshold. 4. Use Adopted Threshold When Making Class Predictions on New Data. Although simple, there are a few different approaches to implementing threshold-moving depending on your circumstance. We will take a look at some of the most common examples in the following sections. Popular way of training a cost-sensitive classifier without a known cost matrix is to put emphasis on modifying the classification outputs when predictions are being made on new data. This is usually done by setting a threshold on the positive class, below which the negative one is being predicted. The value of this threshold is optimized using a validation set and thus the cost matrix can be learned from training data.  It has been stated that trying other methods, such as sampling, without trying by simply setting the threshold may be misleading. The threshold-moving method uses the original training set to train [a model] and then moves the decision threshold such that the minority class examples are easier to be predicted correctly.  × Optimal Threshold for ROC Curve A ROC curve is a diagnostic plot that evaluates a set of probability predictions made by a model on a test dataset. A set of different thresholds are used to interpret the true positive rate and the false positive rate of the predictions on the positive (minority) class, and the scores are plotted in a line of increasing thresholds to create a curve. The false-positive rate is plotted on the x-axis and the true positive rate is plotted on the y-axis and the plot is referred to as the Receiver Operating Characteristic curve, or ROC curve. A diagonal line on the plot from the bottom-left to top-right indicates the “curve” for a no-skill classifier (predicts the majority class in all cases), and a point in the top left of the plot indicates a model with perfect skill. The curve is useful to understand the trade-off in the true-positive rate and false-positive rate for different thresholds. The area under the ROC Curve, so-called ROC AUC, provides a single number to summarize the performance of a model in terms of its ROC Curve with a value between 0.5 (no-skill) and 1.0 (perfect skill). The ROC Curve is a useful diagnostic tool for understanding the trade-off for different thresholds and the ROC AUC provides a useful number for comparing models based on their general capabilities. If crisp class labels are required from a model under such an analysis, then an optimal threshold is required. This would be a threshold on the curve that is closest to the top-left of the plot. Thankfully, there are principled ways of locating this point. First, let’s fit a model and calculate a ROC Curve. We can use the make_classification() function to create a synthetic binary classification problem with 10,000 examples (rows), 99 percent of which belong to the majority class and 1 percent belong to the minority class. We can then split the dataset using the train_test_split() function and use half for the training set and half for the test set. We can then fit a LogisticRegression model and use it to make probability predictions on the test set and keep only the probability predictions for the minority class. 1 2 3 4 ... # generate dataset X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0, n_clusters_per_class=1, weights=[0.99], flip_y=0, random_state=4) 1 2 3 ... # split into train/test sets trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.5, random_state=2, stratify 1 2 3 4 ... # fit a model model = LogisticRegression(solver='lbfgs') model.fit(trainX, trainy) × We can then use the roc_auc_score() function to calculate the true-positive rate and false-positive rate for the predictions using a set of thresholds that can then be used to create a ROC Curve plot. We can tie this all together, defining the dataset, fitting the model, and creating the ROC Curve plot. The complete example is listed below. Running the example fits a logistic regression model on the training dataset then evaluates it using a range of thresholds on the test set, creating the ROC Curve We can see that there are a number of points or thresholds close to the top-left of the plot. Which is the threshold that is optimal? 5 6 7 8 # predict probabilities lr_probs = model.predict_proba(testX) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] 1 2 3 ... # calculate scores lr_auc = roc_auc_score(testy, lr_probs)
642626514ca7e3f21d4bf4fce71edac1
{ "intermediate": 0.3206579089164734, "beginner": 0.3420698344707489, "expert": 0.3372722268104553 }
37,233
Help me develop an android app to visualize mathematical functions and plot them
ca33a7b7b58abd2146256953b1fc04fa
{ "intermediate": 0.4531112313270569, "beginner": 0.21128162741661072, "expert": 0.3356071412563324 }
37,234
class ImageViewer: def __init__(self, root): self.SUPPORTED_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".JPG", ".JPEG", ".BMP", ".GIF", ".webp", ".WEBP") self.button_style = {"font": ("consolas", 10), "fg": "white", "bg": "#3c3c3c", "relief": "flat"} #self.image_loading_queue = Queue(maxsize=5) self.switch_timestamps = [] self.root = root self.root.geometry("800x600") # Initial window size self.root.title("Jestur Viewer") self.root.configure(bg="#808080") # Set window background color self.root.attributes("-topmost", True) # Make the window stay on top self.image_folder = "" self.image_files = [] self.current_image_index = 0 self.main_frame = tk.Frame(self.root, bg="#808080") self.main_frame.pack() self.select_folder_button = tk.Button(self.main_frame, text="Select Folder", command=self.select_folder) self.select_folder_button.configure(**self.button_style) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=10) self.set_timer_button = tk.Button(self.main_frame, text="Timer", command=self.set_timer_interval) self.set_timer_button.configure(**self.button_style) self.set_timer_button.pack(side=tk.LEFT, padx=5, pady=10) self.start_button = tk.Button(self.main_frame, text="Start", command=self.start_pause_slideshow) self.start_button.configure(font=self.button_style["font"], bg="#909090", fg="#707070", relief=tk.FLAT) self.start_button.pack(side=tk.LEFT, padx=5, pady=10) self.mirror_button = tk.Button(self.main_frame, text=">|<", command=self.toggle_mirror_image) self.mirror_button.configure(**self.button_style) self.mirror_button.pack(side=tk.LEFT, padx=5, pady=10) self.greyscale_button = tk.Button(self.main_frame, text="B/W", command=self.toggle_greyscale) self.greyscale_button.configure(**self.button_style) self.greyscale_button.pack(side=tk.LEFT, padx=5, pady=10) self.random_next_image_var = tk.BooleanVar(value=False) # Checkbox variable self.random_next_image_checkbox = tk.Checkbutton(self.main_frame, text="Random", variable=self.random_next_image_var, command=self.toggle_colors) self.random_next_image_checkbox.configure(**self.button_style, bd=2) self.random_next_image_checkbox.pack(side=tk.LEFT, padx=5, pady=10) #image label self.canvas = tk.Canvas(self.root) self.is_mirrored = False self.is_greyscale = False self.is_paused = False self.timer_label = tk.Label(self.root, text="5", **self.button_style, anchor="ne") #fg="white", bg="black", font=("arial", 12), bd=1, relief=tk.RIDGE) self.timer_label.configure(font=("consolas",17)) self.timer_label.place(relx=1, anchor="ne", x=-0, y=0) self.timer_interval = 5000 # Default time interval in milliseconds (5 seconds) self.set_timer_interval = 5000 # Store the set timer interval self.timer = None # Timer ID self.window_size = (self.root.winfo_width(), self.root.winfo_height()) self.resize_timer = None self.root.bind("<Right>", self.next_image) self.root.bind("<Left>", self.previous_image) self.root.bind("<space>", self.start_pause_slideshow) self.root.bind("<Configure>", self.update_image_size) def toggle_colors(self): if self.random_next_image_var.get(): self.random_next_image_checkbox.configure(fg="#2c2c2c", bg="#d8d8d8") else: self.random_next_image_checkbox.configure(**self.button_style) def select_folder(self): self.image_folder = filedialog.askdirectory() if self.image_folder: image_files = os.listdir(self.image_folder) # Get all files in the selected folder self.image_files = [file for file in image_files if file.endswith(self.SUPPORTED_EXTENSIONS)] # Filter image files if len(self.image_files) > 0: self.current_image_index = 0 self.canvas.pack(fill=tk.BOTH, expand=True) self.select_folder_button.pack(side=tk.LEFT, padx=5, pady=5) self.root.title("Jesturing in " + self.image_folder) self.canvas.config(bg="#808080", highlightthickness=0) self.display_image() self.start_button.config(**self.button_style) else: messagebox.showinfo("No Image Files", "The selected folder does not contain any image files.") self.image_folder = "" self.root.title("Jestur") def set_timer_interval(self): self.root.attributes("-topmost", False) interval = simpledialog.askinteger("Set Timer Interval", "How many seconds?") if interval: if interval < 1: # Check if interval is less than 1 second interval = 1 self.timer_interval = interval * 1000 # Convert to milliseconds self.set_timer_interval = self.timer_interval # Store the set timer interval self.root.lift() # Bring the main window to the top self.root.focus_force() # Give focus to the main window self.root.attributes("-topmost", True) def start_pause_slideshow(self, event=None): if self.image_folder == "": messagebox.showinfo("You haven't picked a folder", "Please select an image folder first.") else: if self.timer is None: # Check if timer is not running self.display_image() self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: if self.is_paused: # Check if the slideshow is paused self.start_button.config(text="Pause") self.is_paused = False # Set paused flag to False self.start_timer() else: self.pause_timer() self.start_button.config(text="Start") self.is_paused = True # Set paused flag to True def pause_timer(self): if self.timer is not None: self.root.after_cancel(self.timer) self.timer = None def start_timer(self): if self.image_folder != "" and not self.is_paused: self.update_timer() self.timer = self.root.after(1000, self.start_timer) def update_timer(self): seconds_left = int(self.timer_interval / 1000) self.timer_label.config(text=f"{seconds_left}") self.timer_interval -= 1000 if self.timer_interval < 0: self.timer_interval = self.set_timer_interval # Use the stored set timer interval self.next_image() def toggle_mirror_image(self): self.is_mirrored = not self.is_mirrored self.display_image() def toggle_greyscale(self): self.is_greyscale = not self.is_greyscale self.display_image() def load_image(self, image_path): image = Image.open(image_path) # Check if the image has EXIF data if "exif" in image.info: exif_data = piexif.load(image.info["exif"]) if piexif.ImageIFD.Orientation in exif_data["0th"]: orientation = exif_data["0th"][piexif.ImageIFD.Orientation] if orientation == 3: image = image.rotate(180, expand=True) elif orientation == 6: image = image.rotate(-90, expand=True) elif orientation == 8: image = image.rotate(90, expand=True) if self.is_greyscale: image = image.convert("L") if self.is_mirrored: image = image.transpose(Image.FLIP_LEFT_RIGHT) aspect_ratio = image.width / image.height canvas_width = self.canvas.winfo_width() canvas_height = self.canvas.winfo_height() max_width = min(canvas_width, int(aspect_ratio * canvas_height)) max_height = min(canvas_height, int(canvas_width / aspect_ratio)) scale_factor = min(max_width / image.width, max_height / image.height) new_width = int(image.width * scale_factor) new_height = int(image.height * scale_factor) if new_width > 0 and new_height > 0: resized_image = image.resize((new_width, new_height), Image.BICUBIC) self.photo = ImageTk.PhotoImage(resized_image) self.canvas.delete("all") self.canvas.create_image(canvas_width // 2, canvas_height // 2, image=self.photo) def display_image(self): if self.image_folder != "" and len(self.image_files) > 0: if self.update_switch_timestamps(): # Show the text of the image name instead of loading the actual image image_name = self.image_files[self.current_image_index] self.canvas.delete("all") # Clear the canvas self.canvas.create_text( self.canvas.winfo_width() // 2, self.canvas.winfo_height() // 2, text=image_name, fill="white" ) else: # Process the queue and load the image normally if not quick-switching image_path = os.path.join(self.image_folder, self.image_files[self.current_image_index]) threading.Thread(target=self.load_image, args=(image_path,)).start() def update_image_size(self, event=None): current_width = self.root.winfo_width() current_height = self.root.winfo_height() if (current_width, current_height) != self.window_size: self.window_size = (current_width, current_height) self.canvas.config(width=current_width, height=current_height) self.buffer_forsize() def buffer_forsize(self, event=None): if self.resize_timer: self.root.after_cancel(self.resize_timer) self.resize_timer = self.root.after(500, self.display_image) def update_switch_timestamps(self): current_time = time.time() self.switch_timestamps.append(current_time) self.switch_timestamps = [t for t in self.switch_timestamps if current_time - t <= 2] if len(self.switch_timestamps) > 6: return True # Too many updates in a short time period return False def next_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index + 1) % len(self.image_files) # Linearly select the next image index self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval def previous_image(self, event=None): if self.image_folder != "": if self.random_next_image_var.get(): # Check if random next image is selected self.current_image_index = random.randint(0, len(self.image_files) - 1) # Randomly select the next image index else: self.current_image_index = (self.current_image_index - 1) % len(self.image_files) self.display_image() self.timer_interval = self.set_timer_interval # Use the stored set timer interval can you make it so at the end of showing the text image name it waits for a 500ms of no keypresses then loads the image like it's supposed to. make sure it only waits not when loading the image but only when the image name text part is triggered
e7401e981e3fdfd2e3fa8b5da6e52ae2
{ "intermediate": 0.2561878561973572, "beginner": 0.552480936050415, "expert": 0.19133126735687256 }
37,235
I have a list of emails and passwords like this: <PRESIDIO_ANONYMIZED_EMAIL_ADDRESS>:51128891A@ <PRESIDIO_ANONYMIZED_EMAIL_ADDRESS>:Rebane12345 <PRESIDIO_ANONYMIZED_EMAIL_ADDRESS>:198147gao! <PRESIDIO_ANONYMIZED_EMAIL_ADDRESS>:mayalona* <PRESIDIO_ANONYMIZED_EMAIL_ADDRESS>:shilianzhe1* and i wanna extract all the emails ending in @hot.ee
49d6699bd93cd3d13727d27a6b1cfa12
{ "intermediate": 0.3383646011352539, "beginner": 0.28422802686691284, "expert": 0.37740740180015564 }
37,236
I will give you a big amout od content, which is from a pdf files for my study material. I want you to make a list of all the topics of it have.
0574f3f783a2d3117db0a52905290150
{ "intermediate": 0.38466981053352356, "beginner": 0.31936895847320557, "expert": 0.29596126079559326 }
37,237
using UnityEngine; using System.Collections; using UnityEngine.UI; using com.ootii.Actors.AnimationControllers; using com.ootii.Cameras; using com.ootii.Input; using AIBehavior; using BreadcrumbAi; using UnityEngine.EventSystems; using JetBrains.Annotations; using System.Collections.Generic; //using NUnit.Framework; using com.ootii.Actors; using com.ootii.Actors.LifeCores; using BeautifyEffect; using FIMSpace.FLook; using com.ootii.Helpers; using com.ootii.Utilities.Debug; using com.ootii.Geometry; public class PlayerVitals : MonoBehaviour, IDataPersistence { public Transform debugTransform; public GameObject bulletProjectile; public Transform bulletParent; public GameObject currentHitObject; public List<GameObject> currentHitObjects = new List<GameObject>(); public float sphereRadius; public float knifeRadius; public float maxDistance; public LayerMask aimLayerMask; public LayerMask layerMask; public LayerMask obstacleMask; private Vector3 origin; private Vector3 direction; private float currentHitDistance; float damageTime = 1.0f; // every 1 second float currentDamageTime; float timePainSoundPlayed; public float reloadtime; public float fireRate = 0.8f; private float nextTimetoFire = 0f; private bool isReloading; private int bulletPerShot = 15; private float inaccuracyDistance = 0.1f; public Transform firePoint; public Transform knifeHitPoint; public ParticleSystem muzzleFlash; public GameObject TentacleFxParent; public ParticleSystem[] TentaclesFX; public GameObject muzzleLight; public GameObject shotgun; public int maxAmmo; public int currentAmmo; public int damage; public int headshotDamage; public int knifeDamage = 2; public float aimSpeed = 1f; public Slider staminaSlider; public float Stamina; public float minStamina; public float currentStamina; private float StaminaRegenTimer = 0.0f; private const float StaminaDecreasePerFrame = 3.5f; private const float StaminaIncreasePerFrame = 3.0f; private const float StaminaTimeToRegen = 1.5f; public Slider healthSlider; public float Health; public float currentHealth; public bool isInjured; public GameObject bloodPrefab; public bool isCrouching; public Slider flashlightSlider; public float maxFlashlight = 100; public float flashlightFallRate; public float currentFlashlight; public float flashlightDistance; public float flashlightRadius; public Light LightSource; public bool LightOn; public bool GlowStickON; public AudioClip TurnOn; public AudioClip TurnOff; public AudioClip Healsound; public AudioClip Aiming; public AudioClip FireShot; public AudioClip ReloadSound; public AudioClip GunEmpty; public AudioClip BulletReloadSound; public AudioClip GasolineSound; public AudioClip DeathSound; public AudioClip DeathTragicMusic; public AudioClip GameOverUIMusic; public AudioClip Heartbeat; public AudioClip FrySound; public AudioClip headshotSound; public AudioClip knifeSound; public AudioClip knifeHitSound; public AudioSource weaponAudioSource; public AudioSource HeartbeatAudioSource; [SerializeField] AudioClip[] Painsounds; [SerializeField] AudioClip[] Hitsounds; private AudioSource audioSource; public GameObject Inventory; public CameraController camRig; public UnityInputSource InputSource; public GameObject Player; public GameObject Gameover; public Animator m_Animator; public Animator BloodScreenFx; public Animator stealthFX; public bool isDead; public bool isSuiciding; public MotionController motionController; public ActorController actorController; protected MotionController mMotionController = null; public CameraShake CamShake; public bool isAiming; public bool isSettingTrap; public bool isSliding; public bool isRunning; public Transform Bullet1, Bullet2, Bullet3, Bullet4, Bullet5, Bullet6, Bullet7, Bullet8; public Transform AmmoText; public GameObject ShotgunUI; public GameObject PlayerVitalsUI; public GameObject QuestUI; public GameObject GasolineUI; public GameObject KnifeUI; public GameObject Reticle; public GameObject RedReticle; public GameObject GasCan; public GameObject gasolineTrap; public GameObject GlowStick; public GameObject CharacterHead; public GameObject FlashlightPoint; public Slider gasolineSlider; public float maxGasoline = 100; public float currentGasoline; public int trainDestroyed; public int deathCount; public int fatalHeadShot; [HideInInspector] public NotificationUI MsgUI; private BreadcrumbAi.Ai ai; private GameObject[] zombies; private Transform closestZombie; private DemoEnemyControls zombieHealth; private CrawlerControls crawlerControls; private SpreadFire spreadFire; private EasterEgg easterEgg; private Beautify beautify; private InventoryManager InvManager; private CameraFilterPack_AAA_BloodOnScreen camBlood; private FLookAnimator fLook; private FearSystem fearSystem; private bool tentacleIsPlaying; private string BaseLayer; private Quaternion _lookRotation; private Vector3 _direction; private bool _1stPerson; private Camera mainCamera; private void Awake() { if (menuScript1.PlayerSelected == 0) { Health = 100; maxAmmo = 5; damage = 2; headshotDamage = 4; maxDistance = 8; Stamina = 100; } if (menuScript1.PlayerSelected == 1) { //Has Shotgun //Normal Health (100) //Normal Stamina (100) //Normal Sanity (20) //Low Damage (1-3) Health = 100; maxAmmo = 5; damage = 1; headshotDamage = 3; maxDistance = 8; Stamina = 100; } if (menuScript1.PlayerSelected == 2) { //Has Pistol //High Health (125) //Low Stamina (75) //High Sanity (30) //Medium Damage (3-5) Health = 125; maxAmmo = 8; damage = 3; headshotDamage = 5; maxDistance = 15; Stamina = 75; } if (menuScript1.PlayerSelected == 3) { //Has Pistol //Low Health (50) //High Stamina (125) //Low Sanity (20) //High Damage (5-7) Health = 50; maxAmmo = 8; damage = 5; headshotDamage = 7; maxDistance = 6; Stamina = 125; } } void Start() { Cursor.visible = false; isCrouching = false; //currentAmmo = 0; AmmoText.GetComponent<Text>().text = currentAmmo.ToString() + " / " + maxAmmo; healthSlider.interactable = false; healthSlider.maxValue = Health; //currentHealth = Health; healthSlider.value = currentHealth; staminaSlider.interactable = false; staminaSlider.maxValue = Stamina; //currentStamina = Stamina; staminaSlider.value = currentStamina; flashlightSlider.interactable = false; flashlightSlider.maxValue = maxFlashlight; flashlightSlider.value = currentFlashlight; //currentFlashlight = maxFlashlight; LightOn = false; LightSource.intensity = 0f; isInjured = false; LightSource = GetComponentInChildren<Light>(); audioSource = GetComponent<AudioSource>(); beautify = FindObjectOfType(typeof(Beautify)) as Beautify; Inventory.SetActive(false); Gameover.SetActive(false); CameraController camRig = gameObject.GetComponent<CameraController>(); UnityInputSource InputSource = GetComponent<UnityInputSource>(); m_Animator = gameObject.GetComponent<Animator>(); m_Animator.SetBool("isDead", false); m_Animator.SetBool("isInjured", false); motionController = GetComponent<MotionController>(); mMotionController = gameObject.GetComponent<MotionController>(); CameraController lController = gameObject.GetComponent<CameraController>(); CamShake = gameObject.GetComponent<CameraShake>(); InvManager = FindObjectOfType(typeof(InventoryManager)) as InventoryManager; camBlood = Camera.main.GetComponent<CameraFilterPack_AAA_BloodOnScreen>(); fLook = gameObject.GetComponent<FLookAnimator>(); fearSystem = GetComponent<FearSystem>(); //tentacleIsPlaying = false; TentaclesFX = TentacleFxParent.GetComponentsInChildren<ParticleSystem>(); closestZombie = null; BaseLayer = "Base Layer"; _1stPerson = false; mainCamera = Camera.main; //set weapon for (int t = 0; t < InvManager.Items; t++) //Starting a loop in the slots of the inventory: { if (InvManager.Slots[t].IsTaken == true) //Checking if there's an item in this slot. { Item ItemScript = InvManager.Slots[t].Item.GetComponent<Item>(); //Getting the item script from the items inside the bag. if (ItemScript.Name == "Shotgun" || ItemScript.Name == "Colt 1911") //Checking if the type of the new item matches with another item already in the bag. { shotgun.SetActive(true); ShotgunUI.SetActive(true); } } } } void Update() { SwitchState(); StartCoroutine(ToggleCrouch()); StartCoroutine(CheckGasoline()); HandleTentacleFX(); HandleReload(); HandleAmmoUI(); HandleGunFire(); HandleStatsLock(); HandleFlashlight(); HandleStamina(); HandleFirstPerson(); //AIMING SOUND if (Input.GetMouseButtonDown(1) && shotgun.activeInHierarchy == true) { AudioClip clip = Aiming; weaponAudioSource.PlayOneShot(clip); } //HEALING CHEAT /*if(Input.GetKeyDown(KeyCode.H)) { MotionControllerMotion bandage = mMotionController.GetMotion(1, "IdleBandage"); mMotionController.ActivateMotion(bandage); currentHealth += 10; audioSource.PlayOneShot(Healsound); if (healthSlider.value >= 100) { currentHealth = Health; } }*/ } private void FixedUpdate() { HandleAiming(); HandleFlashlightPhysics(); } private void HandleTentacleFX() { //TENTACLES FX if (fearSystem.isInsane || isSuiciding) { if (!tentacleIsPlaying) { foreach (ParticleSystem tentacle in TentaclesFX) { tentacle.Play(); tentacle.loop = true; tentacleIsPlaying = true; } } } if (!fearSystem.isInsane) { if (tentacleIsPlaying) { foreach (ParticleSystem tentacle in TentaclesFX) { tentacleIsPlaying = false; tentacle.loop = false; } } } } private void HandleReload() { //GUN RELOAD if (shotgun.activeInHierarchy == true && !isSettingTrap && Input.GetKeyDown(KeyCode.R) || Input.GetKeyDown(KeyCode.JoystickButton3)) { if (!isReloading) { StartCoroutine(Reload()); } } } private void HandleAmmoUI() { //BULLET UI STRING AmmoText.GetComponent<Text>().text = currentAmmo.ToString() + " / " + maxAmmo; //BULLET UI SECTION if (currentAmmo > 0) { Bullet1.GetComponent<Image>().enabled = true; } else { Bullet1.GetComponent<Image>().enabled = false; } if (currentAmmo > 1) { Bullet2.GetComponent<Image>().enabled = true; } else { Bullet2.GetComponent<Image>().enabled = false; } if (currentAmmo > 2) { Bullet3.GetComponent<Image>().enabled = true; } else { Bullet3.GetComponent<Image>().enabled = false; } if (currentAmmo > 3) { Bullet4.GetComponent<Image>().enabled = true; } else { Bullet4.GetComponent<Image>().enabled = false; } if (currentAmmo > 4) { Bullet5.GetComponent<Image>().enabled = true; } else { Bullet5.GetComponent<Image>().enabled = false; } if (menuScript1.PlayerSelected == 2 || menuScript1.PlayerSelected == 3) { if (currentAmmo > 5) { Bullet6.GetComponent<Image>().enabled = true; } else { Bullet6.GetComponent<Image>().enabled = false; } if (currentAmmo > 6) { Bullet7.GetComponent<Image>().enabled = true; } else { Bullet7.GetComponent<Image>().enabled = false; } if (currentAmmo > 7) { Bullet8.GetComponent<Image>().enabled = true; } else { Bullet8.GetComponent<Image>().enabled = false; } } } private void HandleAiming() { //AIMING SECTION if (Input.GetMouseButton(1) && !m_Animator.GetCurrentAnimatorStateInfo(1).IsName("IdleBandage") && shotgun.activeInHierarchy == true && !isSettingTrap) { m_Animator.SetBool("isAiming", true); isAiming = true; fLook.enabled = false; if (mainCamera != null) { Ray ray = mainCamera.ViewportPointToRay(Vector3.one * 0.5f); //Debug.DrawRay (ray.origin, ray.direction * maxDistance, Color.magenta, 2f); RaycastHit hit; if (Physics.Raycast(ray.origin, ray.direction, out hit, Mathf.Infinity, layerMask)) { zombieHealth = hit.collider.GetComponentInParent<DemoEnemyControls>(); crawlerControls = hit.collider.GetComponentInParent<CrawlerControls>(); easterEgg = hit.collider.GetComponent<EasterEgg>(); bool gasoline = hit.transform.gameObject.CompareTag("Gas"); if (Physics.Raycast(ray.origin, ray.direction, out hit, Mathf.Infinity, obstacleMask)) //White Reticle If Obstacles { Reticle.SetActive(true); RedReticle.SetActive(false); } else if (zombieHealth != null || crawlerControls != null || easterEgg != null || gasoline) { //Red Reticle If Available Target RedReticle.SetActive(true); Reticle.SetActive(false); } } else { //White Reticle If Nothing Reticle.SetActive(true); RedReticle.SetActive(false); } } } else { m_Animator.SetBool("isAiming", false); isAiming = false; fLook.enabled = true; Reticle.SetActive(false); RedReticle.SetActive(false); } } private void HandleGunFire() { //GUN FIRE if (Input.GetMouseButtonDown(0) && m_Animator.GetBool("isAiming") == true && Time.time >= nextTimetoFire && !isReloading && shotgun.activeInHierarchy == true && !isSettingTrap) { if (currentAmmo >= 1) { nextTimetoFire = Time.time + 1f / fireRate; //StartCoroutine(Fire()); StartCoroutine(Shoot()); } else { MsgUI = FindObjectOfType(typeof(NotificationUI)) as NotificationUI; MsgUI.SendMsg("No Ammo! Press R to reload."); AudioClip clip = GunEmpty; weaponAudioSource.PlayOneShot(clip); } } } private void HandleStatsLock() { //STATS LOCK healthSlider.value = currentHealth; staminaSlider.value = currentStamina; flashlightSlider.value = currentFlashlight; if (currentHealth > Health) { currentHealth = Health; } if (currentStamina > Stamina) { currentStamina = Stamina; } if (currentHealth < minStamina) { currentStamina = minStamina; } //AMMO LOCK if (currentAmmo > maxAmmo) { currentAmmo = maxAmmo; } } private void HandleFlashlightPhysics() { //FLASHLIGHT DOUBLE WALKERS VISION if (LightOn) { currentHitObjects.Clear(); RaycastHit[] hits = new RaycastHit[1]; int numberOfHits = Physics.SphereCastNonAlloc(FlashlightPoint.transform.position, flashlightRadius, FlashlightPoint.transform.forward, hits, flashlightDistance, layerMask, QueryTriggerInteraction.UseGlobal); if (numberOfHits >= 1) { for (int i = 0; i < numberOfHits; i++) { currentHitObjects.Add(hits[i].transform.gameObject); currentHitDistance = hits[i].distance; foreach (GameObject zombie in currentHitObjects) { ai = hits[i].collider.GetComponentInParent<BreadcrumbAi.Ai>(); if (ai != null) { ai.visionDistance = ai.visionDistance * 2f; } } } } } } private void HandleFlashlight() { //FLASHLIGHT TOGGLE if (Input.GetKeyDown(KeyCode.F) || DPadButtons.IsLeft) { toggleFlashlight(); toggleFlashlightSFX(); if (LightOn) { LightOn = false; } else if (!LightOn && flashlightSlider.value >= 0) { LightOn = true; } } //FLASHLIGHT CONTROL SECTION if (LightOn && flashlightSlider.value >= 0) { currentFlashlight -= Time.deltaTime / flashlightFallRate; LightSource.intensity = 5f; } if (LightOn && flashlightSlider.value <= 75) { LightSource.intensity = 4f; } if (LightOn && flashlightSlider.value <= 50) { LightSource.intensity = 3; } if (LightOn && flashlightSlider.value <= 25) { LightSource.intensity = 1.5f; } if (LightOn && flashlightSlider.value <= 0) { flashlightSlider.value = 0; LightSource.intensity = 0f; currentFlashlight = 0f; } else if (flashlightSlider.value >= maxFlashlight) { flashlightSlider.value = maxFlashlight; } } private void HandleStamina() { //SHOW&HIDE STAMINA SLIDER if (currentStamina < Stamina) { staminaSlider.gameObject.SetActive(true); } else { staminaSlider.gameObject.SetActive(false); } //STAMINA CONTROL SECTION bool _isRunning = IsRunning(); bool _canRun = CanRun(); if (_isRunning && _canRun) { currentStamina = Mathf.Clamp(currentStamina - (StaminaDecreasePerFrame * Time.deltaTime), 0.0f, Stamina); StaminaRegenTimer = 0.0f; } else { if (currentStamina < Stamina) { if (StaminaRegenTimer >= StaminaTimeToRegen) currentStamina = Mathf.Clamp(currentStamina + (StaminaIncreasePerFrame * Time.deltaTime), 0.0f, Stamina); else StaminaRegenTimer += Time.deltaTime; } } if (!_canRun) { StartCoroutine(StopRunMotion()); } else { MotionControllerMotion walkOnlyMotion = mMotionController.GetMotion(0, "Walk Only Motion"); walkOnlyMotion._IsEnabled = false; } } private void HandleFirstPerson() { //FIRST PERSON FEATURE if (Input.GetKeyDown(KeyCode.Tab)) { if (!_1stPerson) { camRig.ActivateMotor(5); _1stPerson = true; motionController.SetAnimatorMotionPhase(0, 1130); motionController.ActivateMotion("Strafe", 0); } else if (_1stPerson) { camRig.ActivateMotor(6); _1stPerson = false; } } } IEnumerator StopRunMotion() { m_Animator.SetFloat("InputMagnitude", 0.5f, 0.1f, 0.5f); m_Animator.SetFloat("InputMagnitudeAvg", 0.5f, 1f, 0.5f); m_Animator.SetFloat("InputY", 0.5f, 1f, 0.5f); yield return new WaitForSeconds(0.1f); MotionControllerMotion walkOnlyMotion = mMotionController.GetMotion(0, "Walk Only Motion"); walkOnlyMotion.IsEnabled = true; } public bool IsRunning() { if (Input.GetKey(KeyCode.LeftShift) && Input.GetKey(KeyCode.W)) { return (true); } else { return (false); } } public bool CanRun() { if (currentStamina > 1f) { return (true); } else { return (false); } } /*IEnumerator Slide() { isSliding = true; AnimationClip slide = m_Animator.GetCurrentAnimatorStateInfo(0).IsName("Slide"); actorController.AddForce(actorController._Transform.forward * 10, m_Animator.GetCurrentAnimatorStateInfo(0).length); yield return new WaitForSeconds(1); Debug.Log("Sliding"); }*/ IEnumerator Knife() { isSliding = true; m_Animator.SetBool("isSliding", true); fLook.enabled = false; AudioClip clip = knifeSound; weaponAudioSource.PlayOneShot(clip); Collider[] hitEnemies = Physics.OverlapSphere(knifeHitPoint.position, knifeRadius, layerMask); foreach (Collider enemies in hitEnemies) { if (isSliding && enemies != null) { if (enemies.transform.tag == "Easter") { easterEgg = enemies.GetComponent<EasterEgg>(); easterEgg.DestroyTrain(); DestroyObject(enemies.transform.gameObject, 8); } if (enemies.transform.tag == "Body" || enemies.transform.tag == "Enemy") { zombieHealth = enemies.GetComponentInParent<DemoEnemyControls>(); crawlerControls = enemies.GetComponentInParent<CrawlerControls>(); if (zombieHealth != null || crawlerControls != null) { zombieHealth.GotHit(knifeDamage); crawlerControls.GotHit(knifeDamage); AudioClip hit = knifeHitSound; weaponAudioSource.PlayOneShot(hit); } } } } yield return new WaitForSeconds(1f); isSliding = false; m_Animator.SetBool("isSliding", false); //mMotionController.enabled = true; fLook.enabled = true; } public void KnifeHit() { /*RaycastHit hit; if (Physics.SphereCast(knifeHitPoint.transform.position, 4f, knifeHitPoint.transform.forward, out hit, 4f, layerMask, QueryTriggerInteraction.UseGlobal)) { currentHitObject = hit.transform.gameObject; currentHitDistance = hit.distance; zombieHealth = hit.collider.GetComponentInParent<DemoEnemyControls>(); easterEgg = hit.collider.GetComponent<EasterEgg>(); if (hit.collider.transform.tag == "Easter") { easterEgg.DestroyTrain(); DestroyObject(hit.transform.gameObject, 8); } if (zombieHealth != null) { if (hit.collider.transform.tag == "Body" || hit.collider.transform.tag == "Enemy") { zombieHealth.GotHit(damage); } } } else { currentHitDistance = maxDistance; currentHitObject = null; }*/ } public IEnumerator CharacterSuicide() { if (isSuiciding) { float timeElapsed = 1f; float lerpDuration = 14; float startValue = 0; float endValue = 1.6f; fearSystem.currentFear = fearSystem.maxFear; //Keeping Anomaly FX //DISABLING CAM&INPUT camRig.enabled = false; camRig._IsCollisionsEnabled = false; InputSource.IsEnabled = false; fLook.enabled = false; if (_1stPerson) { camRig.ActivateMotor(6); } //DISABLING UI LightSource.enabled = false; QuestUI.SetActive(false); ShotgunUI.SetActive(false); GasolineUI.SetActive(false); PlayerVitalsUI.SetActive(false); Inventory.SetActive(false); KnifeUI.SetActive(false); //ON KNEE ANIMATION motionController.SetAnimatorMotionPhase(0, 3400, true); Player.tag = "Dead"; m_Animator.SetBool("isSuiciding", true); isInjured = false; //Disabling Injured Sounds //STOP BLOOD m_Animator.SetBool("isInjured", false); BloodScreenFx.SetBool("50Health", false); BloodScreenFx.SetBool("100Health", true); yield return new WaitForSeconds(5f); //SUICIDE ANIMATION m_Animator.SetBool("isSuiciding2", true); yield return new WaitForSeconds(0.8f); //PLAY AUDIO audioSource.PlayOneShot(FireShot); audioSource.PlayOneShot(DeathTragicMusic); audioSource.PlayOneShot(headshotSound); if (HeartbeatAudioSource.isPlaying) { HeartbeatAudioSource.Stop(); } //HEADSHOT&FALLING ANIMATION CharacterHead.SetActive(false); m_Animator.SetBool("isSuiciding3", true); //NEW BLOOD FX while (timeElapsed < lerpDuration) { camBlood.Blood_On_Screen = Mathf.Lerp(startValue, endValue, timeElapsed / lerpDuration); timeElapsed += Time.deltaTime; yield return null; } camBlood.Blood_On_Screen = endValue; //ENABLING GAMEOVER UI + DEATH MUSIC + HEARTBEAT STOP Gameover.SetActive(true); Cursor.visible = true; audioSource.clip = GameOverUIMusic; audioSource.Play(); audioSource.loop = true; deathCount++; } } public IEnumerator CharacterDeath() { float timeElapsed = 1f; float lerpDuration = 14; float startValue = 0; float endValue = 1.6f; if (isDead) { //PLAY AUDIO audioSource.PlayOneShot(DeathSound); audioSource.PlayOneShot(DeathTragicMusic); //DISABLING CAM&INPUT camRig.enabled = false; camRig._IsCollisionsEnabled = false; InputSource.IsEnabled = false; fLook.enabled = false; if (_1stPerson) { camRig.ActivateMotor(6); } //DEATH ANIMATION motionController.SetAnimatorMotionPhase(0, 3375, true); Player.tag = "Dead"; m_Animator.SetBool("isDead", true); //DISABLING UI LightSource.enabled = false; QuestUI.SetActive(false); ShotgunUI.SetActive(false); GasolineUI.SetActive(false); PlayerVitalsUI.SetActive(false); Inventory.SetActive(false); KnifeUI.SetActive(false); //STOP BLOOD m_Animator.SetBool("isInjured", false); BloodScreenFx.SetBool("50Health", false); BloodScreenFx.SetBool("100Health", true); //SEND ZOMBIES ANIMATION zombies = GameObject.FindGameObjectsWithTag("Enemy"); closestZombie = null; foreach (GameObject zombie in zombies) { float curDistance; curDistance = Vector3.Distance(transform.position, zombie.transform.position); zombieHealth = zombie.gameObject.transform.GetComponent<DemoEnemyControls>(); crawlerControls = zombie.gameObject.transform.GetComponent<CrawlerControls>(); if (curDistance < 1f) { if (zombieHealth != null) { zombieHealth.EatPlayer(); zombieHealth.transform.LookAt(transform); transform.LookAt(zombieHealth.transform); } if (crawlerControls != null) { crawlerControls.EatPlayer(); crawlerControls.transform.LookAt(transform); transform.LookAt(crawlerControls.transform); } } } yield return closestZombie; //NEW BLOOD FX while (timeElapsed < lerpDuration) { camBlood.Blood_On_Screen = Mathf.Lerp(startValue, endValue, timeElapsed / lerpDuration); timeElapsed += Time.deltaTime; yield return null; } camBlood.Blood_On_Screen = endValue; //ENABLING GAMEOVER UI + DEATH MUSIC Gameover.SetActive(true); Cursor.visible = true; audioSource.clip = GameOverUIMusic; audioSource.Play(); audioSource.loop = true; deathCount++; if (HeartbeatAudioSource.isPlaying) { HeartbeatAudioSource.Stop(); } } } void SwitchState() { if (healthSlider.value <= 0 && !isDead) { isDead = true; beautify.sepiaIntensity = 1f; StartCoroutine(CharacterDeath()); } else if (healthSlider.value > 0 && healthSlider.value < (Health / 5.5f)) { beautify.sepiaIntensity = 1f; isInjured = true; } else if (healthSlider.value > (Health / 5.5f) && healthSlider.value < (Health / 4)) { beautify.sepiaIntensity = 0.90f; isInjured = true; } else if (healthSlider.value > (Health / 4) && healthSlider.value < (Health / 2)) { beautify.sepiaIntensity = 0.65f; isInjured = true; } else if (healthSlider.value > (Health / 2) && healthSlider.value < (Health / 1.3f)) { beautify.sepiaIntensity = 0.35f; isInjured = false; } else if (healthSlider.value > (Health / 1.3f)) { beautify.sepiaIntensity = 0f; isInjured = false; } if (isInjured && !isDead && !isSuiciding) { m_Animator.SetBool("isInjured", true); BloodScreenFx.SetBool("50Health", true); BloodScreenFx.SetBool("100Health", false); if (Time.time - timePainSoundPlayed < 4f) return; HeartbeatAudioSource.clip = Heartbeat; HeartbeatAudioSource.Play(); int n = Random.Range(1, Painsounds.Length); AudioClip painSounds = Painsounds[n]; if (!audioSource.isPlaying) { audioSource.PlayOneShot(painSounds); Painsounds[n] = Painsounds[0]; Painsounds[0] = painSounds; timePainSoundPlayed = Time.time; } } if (!isInjured || isDead || isSuiciding) { m_Animator.SetBool("isInjured", false); BloodScreenFx.SetBool("50Health", false); BloodScreenFx.SetBool("100Health", true); HeartbeatAudioSource.clip = Heartbeat; HeartbeatAudioSource.Stop(); } } IEnumerator ToggleCrouch() { for (int i = 0; i < m_Animator.layerCount; i++) { if (m_Animator.GetLayerName(i) == BaseLayer) { AnimatorStateInfo stateInfo = m_Animator.GetCurrentAnimatorStateInfo(i); if (stateInfo.IsName("Base Layer.Sneak v2-SM.Move Tree")) { isCrouching = true; stealthFX.SetBool("Stealth", true); yield return new WaitForSeconds(2); } if (stateInfo.IsName("Base Layer.WalkRunPivot v2-SM.Move Tree")) { isCrouching = false; stealthFX.SetBool("Stealth", false); yield return new WaitForSeconds(2); } } } yield return new WaitForSeconds(2); } public void Bleed(Quaternion rot) { GameObject blood = Instantiate(bloodPrefab, transform.position, rot) as GameObject; Destroy(blood, 5); } public void TakeDamage(float damage) { //PLAY PAIN SOUND int n = Random.Range(1, Hitsounds.Length); AudioClip clip = Hitsounds[n]; audioSource.PlayOneShot(clip); //DAMAGE currentHealth -= damage; //StartCoroutine(HitAnimation()); } public void OnTriggerStay(Collider other) { if (other.gameObject.CompareTag("Fire")) { currentHealth -= Time.deltaTime / 3; if (!audioSource.isPlaying) { AudioClip clip = FrySound; audioSource.PlayOneShot(clip); } } } public void OnTriggerEnter(Collider other) { /*if (isSliding && other != null) { Debug.Log("Knife Damage"); Debug.Log(other.name); if (other.transform.tag == "Easter") { easterEgg = other.GetComponent<EasterEgg>(); easterEgg.DestroyTrain(); DestroyObject(other.transform.gameObject, 8); } if (other.transform.tag == "Body" || other.transform.tag == "Enemy") { zombieHealth = other.GetComponentInParent<DemoEnemyControls>(); if (zombieHealth != null) { zombieHealth.GotHit(knifeDamage); Debug.Log(knifeDamage); } } }*/ } /*private IEnumerator HitAnimation() { //GET NEARBY ZOMBIE THAT ATTACKS zombies = GameObject.FindGameObjectsWithTag("Enemy"); foreach (GameObject zombie in zombies) { float curDistance; curDistance = Vector3.Distance(transform.position, zombie.transform.position); // Determine which direction to rotate towards Vector3 targetPosition = zombie.transform.position; Vector3 targetDirection = transform.position - zombie.transform.position; Quaternion desiredRotation = Quaternion.LookRotation(targetDirection, transform.up); //IF ONE IS NEAR if (curDistance < 1f) { //LOOK AT THIS ZOMBIE Debug.Log("OUCH"); Debug.DrawRay(transform.position, targetPosition * maxDistance, Color.blue, 2f); zombie.transform.LookAt(transform); transform.LookAt(targetPosition, transform.up); //DISABLING CAM&INPUT camRig.enabled = false; camRig._IsCollisionsEnabled = false; InputSource.IsEnabled = false; fLook.enabled = false; yield return new WaitForSeconds(1.5f); //ENABLING CAM&INPUT camRig.enabled = true; camRig._IsCollisionsEnabled = true; InputSource.IsEnabled = true; fLook.enabled = true; } } }*/ void toggleFlashlight() { if (LightOn) { LightSource.enabled = false; } else { LightSource.enabled = true; } } void toggleFlashlightSFX() { if (LightSource.enabled) { audioSource.clip = TurnOn; audioSource.Play(); } else { audioSource.clip = TurnOff; audioSource.Play(); } } private IEnumerator CheckGasoline() { //Toggle UI If Gas In Inventory for (int i = 0; i < InvManager.MaxItems; i++) { if (InvManager.Slots[i].IsTaken == true) //Checking if there's an item in this slot. { Item ItemScript = InvManager.Slots[i].Item.GetComponent<Item>(); //Getting the item script from the items inside the bag. //ItemScript.Name = PlayerPrefs.GetString("Name" + i.ToString()); //Loading the item's name. if (ItemScript.Name == "Gasoline" && !isDead) //Checking if the type of the new item matches with another item already in the bag. { GasolineUI.SetActive(true); gasolineSlider.value = ItemScript.AmountPercent; if (gasolineSlider.value <= 0) { gasolineSlider.value = 0; } if (gasolineSlider.value >= ItemScript.AmountPercent) { gasolineSlider.value = ItemScript.AmountPercent; } if (Input.GetKeyDown(KeyCode.Q) && ItemScript.AmountPercent >= 20 && !isAiming && !isReloading) { ItemScript.AmountPercent -= 20; StartCoroutine(SetTrap()); } } else { GasolineUI.SetActive(false); } } } yield return new WaitForSeconds(1); } private IEnumerator SetTrap() { isSettingTrap = true; AudioClip clip = GasolineSound; audioSource.PlayOneShot(clip); shotgun.SetActive(false); GasCan.SetActive(true); m_Animator.SetBool("isSettingTrap", true); yield return new WaitForSeconds(0.01f); m_Animator.SetBool("isSettingTrap", false); mMotionController.enabled = false; camRig.EnableMotor<TransitionMotor>(false, "Targeting"); camRig.EnableMotor<TransitionMotor>(false, "Targeting In"); camRig.EnableMotor<TransitionMotor>(false, "Targeting Out"); GameObject gas = Instantiate(gasolineTrap, transform.position + transform.forward, Quaternion.Euler(-90, 0, 0)) as GameObject; yield return new WaitForSeconds(4); mMotionController.enabled = true; camRig.EnableMotor<TransitionMotor>(true, "Targeting"); camRig.EnableMotor<TransitionMotor>(true, "Targeting In"); camRig.EnableMotor<TransitionMotor>(true, "Targeting Out"); shotgun.SetActive(true); GasCan.SetActive(false); isSettingTrap = false; } private IEnumerator Fire() { //Debug.DrawRay(firePoint.transform.position, firePoint.transform.forward * 12, Color.red, 2f); //currentHitDistance = maxDistance; //currentHitObjects.Clear(); //RaycastHit[] hits = new RaycastHit[1]; Ray ray = Camera.main.ViewportPointToRay(Vector3.one * 0.5f); Debug.DrawRay(ray.origin, ray.direction * maxDistance, Color.green, 2f); RaycastHit hit; //if (Physics.SphereCast(firePoint.transform.position, sphereRadius, firePoint.transform.forward, out hit, maxDistance, layerMask, QueryTriggerInteraction.UseGlobal)) //if (Physics.SphereCast(ray.origin, sphereRadius, ray.direction, out hit, maxDistance, layerMask, QueryTriggerInteraction.UseGlobal)) //if (!Physics.Raycast(ray.origin, ray.direction, out hit, maxDistance, obstacleMask, QueryTriggerInteraction.UseGlobal)) //Fix to hide enemies behind obstacles if (Physics.Raycast(ray.origin, ray.direction, out hit, maxDistance, layerMask, QueryTriggerInteraction.UseGlobal)) //int numberOfHits = Physics.SphereCastNonAlloc(firePoint.position, sphereRadius, firePoint.transform.forward, hits, maxDistance, layerMask, QueryTriggerInteraction.UseGlobal); //for (int i = 0; i < numberOfHits; i++) { //currentHitObjects.Add(hits[i].transform.gameObject); //currentHitDistance = hits[i].distance; currentHitObject = hit.transform.gameObject; currentHitDistance = hit.distance; //zombieHealth = hits[i].collider.GetComponentInParent<DemoEnemyControls>(); zombieHealth = hit.collider.GetComponentInParent<DemoEnemyControls>(); crawlerControls = hit.collider.GetComponentInParent<CrawlerControls>(); spreadFire = hit.collider.GetComponent<SpreadFire>(); easterEgg = hit.collider.GetComponent<EasterEgg>(); if (hit.collider.transform.tag == "Gas") { spreadFire.SetFire(); DestroyObject(hit.transform.gameObject, 3); } if (hit.collider.transform.tag == "Easter") { trainDestroyed++; easterEgg.DestroyTrain(); DestroyObject(hit.transform.gameObject, 8); } //if (hits[i].collider.transform.tag == "Head") if (hit.collider.transform.tag == "Head") { if (zombieHealth != null) { zombieHealth.HeadShot(headshotDamage); } else if (crawlerControls != null) { crawlerControls.HeadShot(headshotDamage); } } //else if (hits[i].collider.transform.tag == "Body" || hits[i].collider.transform.tag == "Enemy") else if (hit.collider.transform.tag == "Body" || hit.collider.transform.tag == "Enemy") { if (zombieHealth != null) { zombieHealth.GotHit(damage); } else if (crawlerControls != null) { crawlerControls.GotHit(damage); } } } else { currentHitDistance = maxDistance; currentHitObject = null; } m_Animator.SetBool("isFire", true); MotionControllerMotion fire = mMotionController.GetMotion(1, "Fire"); mMotionController.ActivateMotion(fire); AudioClip clip = FireShot; weaponAudioSource.PlayOneShot(clip); muzzleFlash.Play(); muzzleLight.SetActive(true); yield return new WaitForSeconds(0.1f); muzzleLight.SetActive(false); currentAmmo--; CameraShake.Shake(0.25f, 0.08f); AmmoText.GetComponent<Text>().text = currentAmmo.ToString() + " / 5"; if (currentAmmo > 0) { Bullet1.GetComponent<Image>().enabled = true; } else { Bullet1.GetComponent<Image>().enabled = false; } if (currentAmmo > 1) { Bullet2.GetComponent<Image>().enabled = true; } else { Bullet2.GetComponent<Image>().enabled = false; } if (currentAmmo > 2) { Bullet3.GetComponent<Image>().enabled = true; } else { Bullet3.GetComponent<Image>().enabled = false; } if (currentAmmo > 3) { Bullet4.GetComponent<Image>().enabled = true; } else { Bullet4.GetComponent<Image>().enabled = false; } if (currentAmmo > 4) { Bullet5.GetComponent<Image>().enabled = true; } else { Bullet5.GetComponent<Image>().enabled = false; } yield return new WaitForSeconds(1 - .25f); m_Animator.SetBool("isFire", false); yield return new WaitForSeconds(2); } private IEnumerator Shoot() { if (menuScript1.PlayerSelected == 2 || menuScript1.PlayerSelected == 3) { /*GameObject bullet = GameObject.Instantiate(bulletProjectile, firePoint.position, Quaternion.identity, bulletParent); BulletController bulletController = bullet.GetComponent<BulletController>();*/ GameObject pooledBullet = ObjectPool.instance.GetPooledObjects(); if (pooledBullet == null) { yield break; } BulletController bulletController = pooledBullet.GetComponent<BulletController>(); RaycastHit hit; if (Physics.Raycast(mainCamera.transform.position, mainCamera.transform.forward, out hit, Mathf.Infinity, aimLayerMask)) { //debugTransform.position = hit.point; //bulletController.target = hit.point; //bulletController.hit = true; bulletController.target = hit.point; bulletController.hit = true; pooledBullet.transform.position = firePoint.position; pooledBullet.transform.LookAt(mainCamera.transform.forward); pooledBullet.SetActive(true); } else { //debugTransform.position = hit.point; //bulletController.target = mainCamera.transform.position + mainCamera.transform.forward * 25f; //bulletController.hit = false; bulletController.target = mainCamera.transform.position + mainCamera.transform.forward * 25f; bulletController.hit = false; pooledBullet.transform.position = firePoint.position; pooledBullet.transform.LookAt(mainCamera.transform.forward); pooledBullet.SetActive(true); } m_Animator.SetBool("isFire", true); MotionControllerMotion fire = mMotionController.GetMotion(1, "Fire"); mMotionController.ActivateMotion(fire); AudioClip clip = FireShot; weaponAudioSource.PlayOneShot(clip); muzzleFlash.Play(); muzzleLight.SetActive(true); yield return new WaitForSeconds(0.1f); muzzleLight.SetActive(false); currentAmmo--; CameraShake.Shake(0.25f, 0.08f); AmmoText.GetComponent<Text>().text = currentAmmo.ToString() + " / 5"; if (currentAmmo > 0) { Bullet1.GetComponent<Image>().enabled = true; } else { Bullet1.GetComponent<Image>().enabled = false; } if (currentAmmo > 1) { Bullet2.GetComponent<Image>().enabled = true; } else { Bullet2.GetComponent<Image>().enabled = false; } if (currentAmmo > 2) { Bullet3.GetComponent<Image>().enabled = true; } else { Bullet3.GetComponent<Image>().enabled = false; } if (currentAmmo > 3) { Bullet4.GetComponent<Image>().enabled = true; } else { Bullet4.GetComponent<Image>().enabled = false; } if (currentAmmo > 4) { Bullet5.GetComponent<Image>().enabled = true; } else { Bullet5.GetComponent<Image>().enabled = false; } yield return new WaitForSeconds(1 - .25f); m_Animator.SetBool("isFire", false); yield return new WaitForSeconds(2); } else if (menuScript1.PlayerSelected == 0 || menuScript1.PlayerSelected == 1) // IF ARTHUR { for (int i = 0; i < bulletPerShot; i++) { Vector3 shootDirection = mainCamera.transform.forward; shootDirection.x += Random.Range(-inaccuracyDistance * 2, inaccuracyDistance * 2); shootDirection.y += Random.Range(-inaccuracyDistance, inaccuracyDistance); GameObject pooledBullet = ObjectPool.instance.GetPooledObjects(); if (pooledBullet == null) { yield break; } BulletController bulletController = pooledBullet.GetComponent<BulletController>(); RaycastHit hit; if (Physics.Raycast(mainCamera.transform.position, shootDirection, out hit, Mathf.Infinity, aimLayerMask)) { //debugTransform.position = hit.point; bulletController.target = hit.point; bulletController.hit = true; pooledBullet.transform.position = firePoint.position; pooledBullet.transform.LookAt(shootDirection); pooledBullet.SetActive(true); } else { //debugTransform.position = hit.point; bulletController.target = mainCamera.transform.position + shootDirection; bulletController.hit = false; pooledBullet.transform.position = firePoint.position; pooledBullet.transform.LookAt(shootDirection); pooledBullet.SetActive(true); } } m_Animator.SetBool("isFire", true); MotionControllerMotion fire = mMotionController.GetMotion(1, "Fire"); mMotionController.ActivateMotion(fire); AudioClip clip = FireShot; weaponAudioSource.PlayOneShot(clip); muzzleFlash.Play(); muzzleLight.SetActive(true); yield return new WaitForSeconds(0.1f); muzzleLight.SetActive(false); currentAmmo--; CameraShake.Shake(0.25f, 0.08f); AmmoText.GetComponent<Text>().text = currentAmmo.ToString() + " / 5"; if (currentAmmo > 0) { Bullet1.GetComponent<Image>().enabled = true; } else { Bullet1.GetComponent<Image>().enabled = false; } if (currentAmmo > 1) { Bullet2.GetComponent<Image>().enabled = true; } else { Bullet2.GetComponent<Image>().enabled = false; } if (currentAmmo > 2) { Bullet3.GetComponent<Image>().enabled = true; } else { Bullet3.GetComponent<Image>().enabled = false; } if (currentAmmo > 3) { Bullet4.GetComponent<Image>().enabled = true; } else { Bullet4.GetComponent<Image>().enabled = false; } if (currentAmmo > 4) { Bullet5.GetComponent<Image>().enabled = true; } else { Bullet5.GetComponent<Image>().enabled = false; } yield return new WaitForSeconds(1 - .25f); m_Animator.SetBool("isFire", false); yield return new WaitForSeconds(2); } } Vector3 GetShootingDirection() { Vector3 targetPos = Camera.main.transform.position + Camera.main.transform.forward * Mathf.Infinity; targetPos = new Vector3( targetPos.x = Random.Range(-inaccuracyDistance, inaccuracyDistance), targetPos.y = Random.Range(-inaccuracyDistance, inaccuracyDistance), targetPos.z = Random.Range(-inaccuracyDistance, inaccuracyDistance) ); direction = targetPos - Camera.main.transform.position; return direction.normalized; } private IEnumerator Reload() { for (int i = 0; i < InvManager.MaxItems; i++) { if (InvManager.Slots[i].IsTaken == true) //Checking if there's an item in this slot. { Item ItemScript = InvManager.Slots[i].Item.GetComponent<Item>(); //Getting the item script from the items inside the bag. //ItemScript.Name = PlayerPrefs.GetString("Name" + i.ToString()); //Loading the item's name. if (ItemScript.Name == "Ammunition" && ItemScript.Amount >= 1) //Checking if the type of the new item matches with another item already in the bag. { isReloading = true; m_Animator.SetBool("isReloading", true); MotionControllerMotion reload = mMotionController.GetMotion(1, "Reload"); mMotionController.ActivateMotion(reload); int ammoToRemove = (maxAmmo - currentAmmo); if (ammoToRemove > ItemScript.Amount) { ammoToRemove = ItemScript.Amount; } InvManager.RemoveItem(InvManager.Slots[i].Item, (maxAmmo - currentAmmo)); for (int b = 0; b < ammoToRemove; b++) { weaponAudioSource.PlayOneShot(BulletReloadSound); currentAmmo++; yield return new WaitForSeconds(reloadtime); if (currentAmmo == maxAmmo) { AudioClip clip = ReloadSound; weaponAudioSource.PlayOneShot(clip); m_Animator.SetBool("isReloading", false); isReloading = false; } } m_Animator.SetBool("isReloading", false); isReloading = false; } } } } public IEnumerator ActivateGlowStick() { GlowStick.SetActive(true); GlowStickON = true; yield return new WaitForSeconds(30); GlowStick.SetActive(false); GlowStickON = false; } private void OnDrawGizmosSelected() { Gizmos.color = Color.yellow; if (knifeHitPoint == null) return; //Debug.DrawLine(FlashlightPoint.transform.position, FlashlightPoint.transform.position + FlashlightPoint.transform.forward * currentHitDistance); //Gizmos.DrawWireSphere(FlashlightPoint.transform.position + FlashlightPoint.transform.forward * currentHitDistance, flashlightRadius); Gizmos.DrawWireSphere(knifeHitPoint.transform.position, knifeRadius); } public void LoadData(GameData data) { this.currentHealth = data.health; this.currentStamina = data.stamina; this.currentFlashlight = data.flashlightAmount; this.currentAmmo = data.ammo; this.trainDestroyed = data.trainDestroyed; this.deathCount = data.deathCount; this.fatalHeadShot= data.fatalHeadShot; this.transform.position = data.playerPosition; } public void SaveData(GameData data) { data.health = this.currentHealth; data.stamina = this.currentStamina; data.flashlightAmount = this.currentFlashlight; data.ammo = this.currentAmmo; data.trainDestroyed = this.trainDestroyed; data.deathCount= this.deathCount; data.fatalHeadShot= this.fatalHeadShot; data.playerPosition = this.transform.position; } }
1cdbce19bff1a9a57b9028de131d1e67
{ "intermediate": 0.31067320704460144, "beginner": 0.492021381855011, "expert": 0.19730542600154877 }
37,238
Extract entities in the following text: "Once when I was six years old I saw a magnificent picture in a book, called True Stories from Nature, about the primeval forest. It was a picture of a boa constrictor in the act of swallowing an animal. Here is a copy of the drawing. In the book it said: "Boa constrictors swallow their prey whole, without chewing it. After that they are not able to move, and they sleep through the six months that they need for digestion." I pondered deeply, then, over the adventures of the jungle. And after some work with a colored pencil I succeeded in making my first drawing. My Drawing Number One. It looked something like this: I showed my masterpiece to the grown-ups, and asked them whether the drawing frightened them. But they answered: "Frighten? Why should any one be frightened by a hat?" My drawing was not a picture of a hat. It was a picture of a boa constrictor digesting an elephant. But since the grown-ups were not able to understand it, I made another drawing: I drew the inside of a boa constrictor, so that the grown-ups could see it clearly. They always need to have things explained. My Drawing Number Two looked like this: The grown-ups' response, this time, was to advise me to lay aside my drawings of boa constrictors, whether from the inside or the outside, and devote myself instead to geography, history, arithmetic, and grammar. That is why, at the age of six, I gave up what might have been a magnificent career as a painter. I had been disheartened by the failure of my Drawing Number One and my Drawing Number Two. Grown-ups never understand anything by themselves, and it is tiresome for children to be always and forever explaining things to them. So then I chose another profession, and learned to pilot airplanes. I have flown a little over all parts of the world; and it is true that geography has been very useful to me. At a glance I can distinguish China from Arizona. If one gets lost in the night, such knowledge is valuable. In the course of this life I have had a great many encounters with a great many people who have been concerned with matters of consequence. I have lived a great deal among grown-ups. I have seen them intimately, close at hand. And that hasn't much improved my opinion of them. Whenever I met one of them who seemed to me at all clear-sighted, I tried the experiment of showing him my Drawing Number One, which I have always kept. I would try to find out, so, if this was a person of true understanding. But, whoever it was, he, or she, would always say: "That is a hat." Then I would never talk to that person about boa constrictors, or primeval forests, or stars. I would bring myself down to his level. I would talk to him about bridge, and golf, and politics, and neckties. And the grown-up would be greatly pleased to have met such a sensible man."
8b816e119a06eb80bbfc8cdbb36a9c45
{ "intermediate": 0.31799590587615967, "beginner": 0.3705499470233917, "expert": 0.311454176902771 }
37,239
Write a regular expression that matches only the first four consecutive spaces
0121a4fca4624f667bf6ee9e5f7dfe72
{ "intermediate": 0.3959711790084839, "beginner": 0.2605806887149811, "expert": 0.3434481918811798 }
37,240
explain this code type T struct { val int } func (p *T) a() { p.val += 1 } func (p T) b() { p.val += 2 } func main() { x := T{5} x.a() x.b() fmt.Println(x.val) }
7019ffece62f5f5b8c0c4d9b59b04ca7
{ "intermediate": 0.2402641624212265, "beginner": 0.6268233060836792, "expert": 0.13291260600090027 }
37,241
quel syntaxe html si je souhaite rajouter dans le <div className='left-part-sub-string'> 3 boutons type checbkox (mode selectionné ou non selectionné) étant : 20 Juillet, 21 Juillet, 22 Juillet en sachant que ces boutton devront permettre une sauvegarde dans les cookies react : import { motion } from 'framer-motion'; import { useState } from 'react'; type Props = { id: number; title: string; price: number|string; nbTicket: number; }; export default function TicketCard(props: Props) { const [isOpen, setIsOpen] = useState(false); const [tickets, setTickets] = useState(props.nbTicket); const [rotation, setRotation] = useState(0); const handleTicketChange = (newTickets: number, event: React.MouseEvent) => { event.stopPropagation(); setTickets(newTickets); }; const contentVariants = { closed: { opacity: 0, height: 0, overflow: 'hidden', transition: { duration: 0.2, ease: 'easeInOut', when: 'afterChildren', }, }, open: { opacity: 1, height: typeof props.price === 'string' ? 140 : 80, transition: { duration: 0.2, ease: 'easeInOut', when: 'beforeChildren', } }, }; const cardVariants = { hidden: { opacity: 0, y: 50 }, visible: { opacity: 1, y: 0, transition: { type: 'spring', stiffness: 120, }, }, }; return ( <motion.div className="ticket-card" layout initial="hidden" animate="visible" variants={cardVariants} onClick={() => { setIsOpen(!isOpen); setRotation(rotation === 0 ? 90 : 0); }} > <div className="content"> <div className='left-part'> <h4>{props.title}</h4> <p>Les tickets ne sont pas remboursables.</p> <p>Dernière entrée à 11H.</p> </div> <div className='right-part'> <p>{props.price}€</p> <motion.div className="svg-container" animate={{ rotate: rotation }}> <svg xmlns="http://www.w3.org/2000/svg" width="13" height="20" viewBox="0 0 13 20" fill="none"> <path d="M2 18L10 10L2 2" stroke="#4E4E4E" strokeWidth="4" /> </svg> </motion.div> </div> </div> <motion.div className={`sub-menu ${typeof props.price === 'string' ? 'large' : ''}`} variants={contentVariants} initial="closed" animate={isOpen ? "open" : "closed"} exit="closed" > {typeof props.price === 'string' ? ( <div className='left-part-sub-string'> <div className ="rect"> <img src="images/billet_pass1j.png" alt="Billet pass 1 jour" /> </div> <svg xmlns="http://www.w3.org/2000/svg" width="22" height="21" viewBox="0 0 22 21" fill="none"> <path d="M22 9.03848H14.6966L19.8599 4.10947L17.6953 2.04109L12.532 6.97007V0H9.46799V6.97007L4.30475 2.04109L2.13807 4.10947L7.30131 9.03848H0V11.9615H7.30131L2.13807 16.8906L4.30475 18.9589L9.46799 14.0299V21H12.532V14.0299L17.6953 18.9589L19.8599 16.8906L14.6966 11.9615H22V9.03848Z" fill="#FFD600"/> </svg> <p>x{tickets} Article(s) sélectionné(s)</p> </div> ) : ( <div className='left-part-sub'> <div className ="rect"> <img src="images/billet_pass1j.png" alt="Billet pass 1 jour" /> </div> <svg xmlns="http://www.w3.org/2000/svg" width="22" height="21" viewBox="0 0 22 21" fill="none"> <path d="M22 9.03848H14.6966L19.8599 4.10947L17.6953 2.04109L12.532 6.97007V0H9.46799V6.97007L4.30475 2.04109L2.13807 4.10947L7.30131 9.03848H0V11.9615H7.30131L2.13807 16.8906L4.30475 18.9589L9.46799 14.0299V21H12.532V14.0299L17.6953 18.9589L19.8599 16.8906L14.6966 11.9615H22V9.03848Z" fill="#FFD600"/> </svg> <p>x{tickets} Article(s) sélectionné(s)</p> </div> )} <div className="ticket-control"> <button onClick={(event) => handleTicketChange(Math.max(tickets - 1, 0), event)}>-</button> <span>{tickets}</span> <button className='sommeButton' onClick={(event) => handleTicketChange(tickets + 1, event)}>+</button> </div> </motion.div> </motion.div> ); }import Cookies from 'js-cookie'; // Définition des types pour les données que vous souhaitez stocker dans les cookies interface UserData { username: string; email: string; // ... autres champs } // Fonction pour définir un cookie export const setCookie = (key: string, value: any, options?: Cookies.CookieAttributes) => { Cookies.set(key, JSON.stringify(value), options); }; // Fonction pour récupérer un cookie export const getCookie = (key: string): any | null => { const cookie = Cookies.get(key); return cookie ? JSON.parse(cookie) : null; }; // Fonction pour supprimer un cookie export const removeCookie = (key: string) => { Cookies.remove(key); }; type userData= { pseudoUser: string; emailUser: string; idUser: number; } export const setUserCookie = (user: userData) => { setCookie("Connected", user, {expires: 7}); } export const isConnected = ():boolean => { const user = getCookie("Connected"); if (user){ return true; } return false; } export const getUserCookie = ():userData => { const user = getCookie("Connected"); return user; } export const removeUserCookie = () => { removeCookie("Connected"); }
02f7aac35c1b5400a7a969f696758bf3
{ "intermediate": 0.34254270792007446, "beginner": 0.45368415117263794, "expert": 0.2037731260061264 }
37,242
fait en sorte que je puisse récuper pour une date si elle est prise ou non avec la checbbox, cela servira après pour le boutton ajoutez au panier du billet ou il y'aura besoin des jours selectionné pour faire la requete axios a la bd : import { motion } from 'framer-motion'; import { useState } from 'react'; type Props = { id: number; title: string; price: number|string; nbTicket: number; }; export default function TicketCard(props: Props) { const [isOpen, setIsOpen] = useState(false); const [tickets, setTickets] = useState(props.nbTicket); const [rotation, setRotation] = useState(0); const handleTicketChange = (newTickets: number, event: React.MouseEvent) => { event.stopPropagation(); setTickets(newTickets); }; const contentVariants = { closed: { opacity: 0, height: 0, overflow: 'hidden', transition: { duration: 0.2, ease: 'easeInOut', when: 'afterChildren', }, }, open: { opacity: 1, height: typeof props.price === 'string' ? 140 : 80, transition: { duration: 0.2, ease: 'easeInOut', when: 'beforeChildren', } }, }; const cardVariants = { hidden: { opacity: 0, y: 50 }, visible: { opacity: 1, y: 0, transition: { type: 'spring', stiffness: 120, }, }, }; return ( <motion.div className="ticket-card" layout initial="hidden" animate="visible" variants={cardVariants} onClick={() => { setIsOpen(!isOpen); setRotation(rotation === 0 ? 90 : 0); }} > <div className="content"> <div className='left-part'> <h4>{props.title}</h4> <p>Les tickets ne sont pas remboursables.</p> <p>Dernière entrée à 11H.</p> </div> <div className='right-part'> <p>{props.price}€</p> <motion.div className="svg-container" animate={{ rotate: rotation }}> <svg xmlns="http://www.w3.org/2000/svg" width="13" height="20" viewBox="0 0 13 20" fill="none"> <path d="M2 18L10 10L2 2" stroke="#4E4E4E" strokeWidth="4" /> </svg> </motion.div> </div> </div> <motion.div className={`sub-menu ${typeof props.price === 'string' ? 'large' : ''}`} variants={contentVariants} initial="closed" animate={isOpen ? "open" : "closed"} exit="closed" > {typeof props.price === 'string' ? ( <div className='left-part-sub-string'> <label> <input type="checkbox" checked={'20Juillet} /> 20 Juillet </label> <label> <input type="checkbox" checked={'21Juillet'} /> 21 Juillet </label> <label> <input type="checkbox" checked={'22Juillet'} /> 22 Juillet </label> <div className ="rect"> <img src="images/billet_pass1j.png" alt="Billet pass 1 jour" /> </div> <svg xmlns="http://www.w3.org/2000/svg" width="22" height="21" viewBox="0 0 22 21" fill="none"> <path d="M22 9.03848H14.6966L19.8599 4.10947L17.6953 2.04109L12.532 6.97007V0H9.46799V6.97007L4.30475 2.04109L2.13807 4.10947L7.30131 9.03848H0V11.9615H7.30131L2.13807 16.8906L4.30475 18.9589L9.46799 14.0299V21H12.532V14.0299L17.6953 18.9589L19.8599 16.8906L14.6966 11.9615H22V9.03848Z" fill="#FFD600"/> </svg> <p>x{tickets} Article(s) sélectionné(s)</p> </div> ) : ( <div className='left-part-sub'> <div className ="rect"> <img src="images/billet_pass1j.png" alt="Billet pass 1 jour" /> </div> <svg xmlns="http://www.w3.org/2000/svg" width="22" height="21" viewBox="0 0 22 21" fill="none"> <path d="M22 9.03848H14.6966L19.8599 4.10947L17.6953 2.04109L12.532 6.97007V0H9.46799V6.97007L4.30475 2.04109L2.13807 4.10947L7.30131 9.03848H0V11.9615H7.30131L2.13807 16.8906L4.30475 18.9589L9.46799 14.0299V21H12.532V14.0299L17.6953 18.9589L19.8599 16.8906L14.6966 11.9615H22V9.03848Z" fill="#FFD600"/> </svg> <p>x{tickets} Article(s) sélectionné(s)</p> </div> )} <div className="ticket-control"> <button onClick={(event) => handleTicketChange(Math.max(tickets - 1, 0), event)}>-</button> <span>{tickets}</span> <button className='sommeButton' onClick={(event) => handleTicketChange(tickets + 1, event)}>+</button> </div> </motion.div> </motion.div> ); }
5d3b6837eebb493bcec5730313ec026d
{ "intermediate": 0.3511400520801544, "beginner": 0.4943366050720215, "expert": 0.15452337265014648 }
37,243
I need a weapon switching c# script for unity
600c36e9c5bf30f5f1d55327a7c0005b
{ "intermediate": 0.4481976330280304, "beginner": 0.32915765047073364, "expert": 0.22264467179775238 }
37,244
typedef struct Attribute_s { size_t size; unsigned int count; GLenum type; const void *pData; GLuint VBO; } Attribute_t; void createVAO(GLuint *pVAO, const unsigned int attributeCount, ...) { glGenVertexArrays(1, pVAO); glBindVertexArray(*pVAO); va_list attributeList; va_start(attributeList, attributeCount); int offset = 0; for (unsigned int i = 0; i < attributeCount; ++i) { Attribute_t attribute = va_arg(attributeList, Attribute_t); GLsizei stride = attribute.count * sizeof(attribute.type); offset += stride; glGenBuffers(1, &attribute.VBO); glBindBuffer(GL_ARRAY_BUFFER, attribute.VBO); glBufferData(GL_ARRAY_BUFFER, attribute.size, attribute.pData, GL_STATIC_DRAW); glVertexAttribPointer(i, attribute.count, attribute.type, GL_FALSE, stride, (void *) &offset); glEnableVertexAttribArray(i); } } GLuint cube_vao, cube_ebo; Attribute_t attributes[] = { {sizeof(positions), 3, GL_FLOAT, positions, 0}, {sizeof(normals), 3, GL_FLOAT, normals, 0}, {sizeof(texture_coordinates), 2, GL_FLOAT, texture_coordinates, 0} }; createVAO(&cube_vao, 3, attributes[0], attributes[1], attributes[2]); glGenBuffers(1, &cube_ebo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cube_ebo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW); Can you spot the cause of undefined behaviour?
75edf4ecfeca7aac970c94cf92ad0397
{ "intermediate": 0.4446225166320801, "beginner": 0.258172869682312, "expert": 0.29720455408096313 }