Document
stringlengths
395
24.5k
Source
stringclasses
6 values
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using RobinhoodDesktop.MachineLearning; namespace RobinhoodDesktop.Script { /// <summary> /// Customize the default StockML class here. Could specify a different model architecture or data processing. /// </summary> class MLInstance : StockML { } class StockSessionMLScript { /// <summary> /// Pulls the processed stock data, and puts it into a format that can be fed into the ML model /// </summary> /// <param name="src">The processed stock data</param> /// <param name="dst">The array the features should be placed in</param> /// <param name="labels">The array the expected outputs should be placed in</param> /// <param name="dstIdx">Update the current postion within the arrays</param> /// <param name="numFeatures">Output the number of features</param> /// <param name="numLabels">Output the number of data labels</param> public static void PopulateData(StockDataSet<StockDataSink> src, float[,] dst, int[,] labels, ref int dstIdx, out int numFeatures, out int numLabels) { numFeatures = 0; numLabels = 0; for(int srcIdx = 0; (srcIdx < src.Count) && (dstIdx < dst.GetLength(0)); srcIdx++, dstIdx++) { // Output features for the data point numFeatures = 0; StockDataSink s = src[srcIdx]; dst[dstIdx, numFeatures++] = s.Average10Min; // Output the labels for the data point - the thing you are trying to predict numLabels = 0; labels[dstIdx, numLabels++] = ((s.Price > s.Average10Min) ? 1 : 0); } } /// <summary> /// Executes a run of processing the stock data /// </summary> /// <param name="session">The session configuration</param> public static void Run(StockSession session) { // Create the stock processor StockProcessor processor = new StockProcessor(session); // Get the sizes of the features and labels int dstIdx = 0; int numDataPoints = 0; int numFeatures, numLabels; float[,] tmpDst = new float[1, 1024]; int[,] tmpLabels = new int[1, 1]; var tmpDataPoint = processor.DerivedData.First().Value[0]; tmpDataPoint.Load(session); PopulateData(tmpDataPoint, tmpDst, tmpLabels, ref dstIdx, out numFeatures, out numLabels); // Determine the total number of data points List<string> symbols = new List<string>() { "GNTX" }; foreach(var s in symbols) { List<StockDataSet<StockDataSink>> sources; if(!processor.DerivedData.TryGetValue(s, out sources)) continue; for(int i = 0; i < sources.Count; i++) { sources[i].Load(session); numDataPoints += sources[i].Count; } } // Allocate the feature and label arrays float[,] features = new float[numDataPoints, numFeatures]; int[,] labels = new int[numDataPoints, numLabels]; // Load the data dstIdx = 0; foreach(var s in symbols) { List<StockDataSet<StockDataSink>> sources; if(!processor.DerivedData.TryGetValue(s, out sources)) continue; // Create a table of each data point in the specified range for(int i = 0; i < sources.Count; i++) { sources[i].Load(session); PopulateData(sources[i], features, labels, ref dstIdx, out numFeatures, out numLabels); } } // Create the Machine Learning instance MLInstance ml = new MLInstance(); ml.BuildFullyConnectedGraph(new int[] { numFeatures, numFeatures, numLabels }); ml.PrepareData(features, labels); ml.Train(); } } }
STACK_EDU
[e2e] TCP improved closing strategies? William Allen Simpson william.allen.simpson at gmail.com Tue Aug 18 16:08:24 PDT 2009 David P. Reed wrote: > On 08/18/2009 12:56 PM, William Allen Simpson wrote: >> Thank you to everybody that provided substantive information and >> I look forward to David's information theoretic cryptology that crams >> several NS, and a half dozen digital signatures into 512 bytes over UDP, >> for the simplest secure case of NXDOMAIN. > I'd suggest that identity based encryption would provide a good starting > point the level of quote-security-endquote that is needed for DNS in the > grand practical scheme of things. But I'd probably be accused of being > unconnected with the simple reality of people who thing that > SOA/certificates/etc. being mumbled makes one an expert on "security". > What is the risk and what is the threat model, in one simple statement > that doesn't involve claims that DNS is somehow a "super secure" system > to start with? At the risk of alienating the others on the list by replying to this drivel, I'm also looking forward to the magic wand that instantaneously replaces DNS with another protocol and infrastructure. Moreover, folks that don't top post in multipart/alternative text/html, expecting others to do the work of fixing their formatting for readability. The thing that makes some of us more expert in security than others is the day to day experience of securing the "grand practical scheme of things." And the willingness to openly ask questions instead of hurling insults.... >> With several hundred thousand clients per minute using 65,000 ports. >> Through NAT boxen that pass *only* TCP and UDP, and don't randomize the >> Source port, and don't properly handle returning IP fragments. Etc. >> Back in the real world, that means TCP semantics, such as retransmission >> of lost segments. >> Or reinventing the wheel (segmentation and retransmission over UDP). > In a world where I check into a hotel that forcibly rapes my packets > starting with the ARP packets and going up through DHCP, so that when I > send a TCP/IP packet to www.google.com on port 80 it gets redirected to > a server that opendns.com (the world's "safest" DNS service) has been > told is to handle all google traffic (no NXDOMAIN here) which scrapes my > requests in order to sell my personal interests to a marketing company? We've all had that experience. Some of us even *predicted* it long ago (late NSFnet/early commercial Internet days). One of us even designed a secure ARP replacement, and proposed a shared-secret requirement for DHCP, with a requirement that every Internet end-to-end session be secured for authentication, confidentiality, and integrity. Other folks argued against it. The very idea that every system required at least 1 configured secret before installation was considered anathema. What about a thousand systems on the loading dock? One fine fellow had the unmitigated gall to state (paraphrased) the ethernet model works fine today, why change it.... I kept the recording for many years, as that person was forcibly made my "co-author" on Neighbor Discovery, who then removed all the security and hidden terminal (for wireless) discovery. Only now are they adding that back again (badly and inelegantly). Better late than never? N.B.: now ATT 2-wire cable boxes actually come pre-configured with a secret, printed right on the label. Finally! If only it was a UPC, so those could easily be scanned into a database for a thousand boxes on the > Get real. Security used to mean something other than employing security > consultants to work on subproblems as if they were the fundamental > issue, and crap up fundamentally weak systems with bells-and-whistles > like TCP magic close protocols that only add DDOS attack risks, while > fixing nothing important. Employing? You're being paid for this diatribe? Where were you during the crypto-wars? Where was *your* running code? Who was it that specified only 65K UDP ports? Who didn't randomize the Source port to prevent prediction, resulting in DNS cache poisoning? Who didn't even think about security for the Internet as a whole? (Compartment options are not security, they're bureaucracy.) More information about the end2end-interest
OPCFW_CODE
The idea is to have an ad system that is flexible, small, usable in many situations. But let us start at the beginning. In many stores you can see a big TV displaying ads of many kind, information regarding the store, weather information, etc. These are called ad systems (will be referred to in this article, at least) and they ought to bring information to the customers. Some of these are interactive, some are not. Flexibility of Raspberry Pi In the projects we did, we mostly used Raspberry Pi. This little thing is huge, when it comes to flexibility. It is a small computer that has its own Linux-based operating system. Thus, it has a lot of functionalities. Also, it is a microcontroller at the same time, having digital and analog inputs and outputs to handle all kinds of digital electronic communication. It is absolutely unbelievable how many things a Raspberry Pi can do. So, we use this instrument to do our work. It has a fullHD display output, HDMI output, some of them even have TV output. It has almost everything a computer has and even more regarding electronics. So, what kind of things can be done with a Raspberry Pi? We did a few projects that were interesting, so we will discuss those. Ad system in a hotel A hotel needed an information surface on every TV they had. They needed to display information about meals, opening hours, etc. Also, they needed the same thing to play a local radio. It sounded a bit hard at first, but we managed to come up with a simple solution. We used a Raspberry Pi. When the graphic interface starts, it loads a browser in full screen mode. In the browser there is a simple HTML and JS based website that displays the needed information, a slider, and also plays the radio using a JS plugin. The Raspberry also has a system that measures outside temperature, also displayed on the screen. On the TV output we have this displayed. Than this output is modulated together with the other TV stations, being the main channel loading when the TV is turned on. In a store we used almost the same technique. It is a full-screen website loaded that displays information about the lates offers, etc. This system communicates with another RaspberryPi that measures height, barometric pressure and temperature. This data is used to display a weather station among the sliders, showing how the weather is and will be. To create such an ad system, some basic things were used. We used the graphic interface of Raspbian (OS). We installed a webserver and added the PHP based Raspberry Sensor Server, a self-developed system for measuring and logging all kinds of sensor data. We created a HTML system to run in the browser as the main interface. These systems can be combined as wanted, can be installed in small places, even on the back of the TV itself for sake of mobility. Using Raspberry Pi as base for an Ad system gives us huge freedom. There are so many possibilities to explore that there are almost no challenges that could not be done. Using easy and simple technologies combined give us useful systems.
OPCFW_CODE
Why are vector fonts rasterised? Vector fonts are used more often than raster ones (because they're scalable), right? Why are they rasterised? What is the benefit of converting vector fonts into raster fonts? What are you talking about exactly? Where are vector fonts rasterized? Any example? http://en.wikipedia.org/wiki/Font_rasterization anything that needs to be displayed/printed would ultimately need to be rasterised as neither your display nor your printer displays things by means of vectors. rather, they display things pixel by pixel (pixel = picture element, for the matter), in a relatively raster manner so eventually one needs to have an approximation of what the perfect curve is like in terms of pixels, either black-or-white or with some grayscale dithering. i see. i meant, what is the benefit of raster fonts when compared to vector fonts?? @DrStrangeLove There is no benefit per se, but in order to be displayed on a pixel-based display you need to rasterize fonts, because an array of pixels can't just display a vector. Wasn't the main benefit of raster fonts the fact that they don't have to be rasterized? I'm not sure if I remember correctly, but I think that I've read somewhere that good rasterization of fonts was a major problem for some time. Good rasterization was a major problem -- of course they solve it by including pre-made raster font in lower point settings. with raster font the font (of course) is optimized already for all available point sizes. Nowadays with these subpixel rendering (and raster font included in vector fonts for small font size as well) good rasterization is considered a solved problem... A vector font has to go through a rasterization process in order to be displayed. This involves "drawing" the font on a bitmap within a section of memory (memory directly connected to the screen or other intermediate memory buffer) which takes time. With a rasterized font, it's been drawn already (rasterized fonts are just collections of bitmaps), so it just needs to be copied (and scaled) to its final destination. So rasterized fonts, especially ones that don't have to be scaled (i.e. being viewed at their "native" size), will render faster. The main benefit is that you can tweak the bitmaps very carefully for specific, often used sizes. At larger point sizes, say at 72pt, the curves are nice and round at a distance. But once you get down to smaller sizes, say 8 pt, you get really bad aliasing and you'd want to tune more specific type things like x height and kerning and so on. Apple fonts used to have, for a given typeface, bitmap (rasterized) fonts in specific sizes to make them look best on screen for the common sizes, and a vector font for all other sizes. So you'd look at the font package, see 10pt, 12pt, and a TrueType font package for the typeface. Finetuning the bitmap used to make a bit more sense when everyone had the same display and the same printer resolution. Not sure how useful it is now in the age of the Retina Display.
STACK_EXCHANGE
Welcome. This is "Quest Unscripted"-- --a vlog series on trending topics-- --and Quest solutions related to Active Directory-- --oh, and don't forget Azure AD. You are here because you have questions. We're here because we have answers. We will address questions we've received from customers-- --experiencing the same challenges as you-- --all with the goal of helping you confidently move-- --your Microsoft environment. We call the show "Quest Unscripted" because-- --except for this intro-- --nothing we say is scripted or rehearsed. And we're pretty sure you'll notice that right away. Hey, folks. Thanks for joining. We're going to be talking about what's new in Change Auditor 7.3. Are you guys excited about the new release? I'm excited about every release. OK, well, speaking of every release, how long has Change Auditor been out there, Bryan? So the Change Auditor brand name was taken from [INAUDIBLE] back in 2008. Quest had actually been working on technology-- it was called InTrust for AD for a while, going back to, I think, around 2004. In 2008, 2009, we blended them together and we retired the InTrust for AD capability. And now we have, with Change Auditor for AD today, the combination of both those products. So where have you seen it shift from where it was to where it is today? So originally, we were enriching the different data. It was from a compliance perspective. We wanted to be able to schedule reports, give an audit type needs. Over the last five-plus years, we're really focusing more on not just compliance, but really trying to get a lot more security capabilities out there because Active Directory has been constantly under attack. So we want to constantly or immediately be notified when something bad does happen. Got you. And speaking of the last few years, in the last few months, we've obviously added one great new team member to the mix. Anna Malec, welcome to Unscripted. And obviously, you've used Change Auditor as a customer. So speaking of what Bryan just talked about and where you are today, what's your perspective on working with Change Auditor from a customer standpoint versus now you basically deliver demos and presentations and you know the product in more details? What's the difference from where you sat before and what you are seeing in Change Auditor today? OK, so, I mean, the product overall I love from every perspective, especially because going through a security event in an organization, a Change Auditor became the most critical tool, at least to me. And so we use it a ton for alerting since then, and then also protections. The difference between then and now is what I learned is we can do DCShadow searches, DCSync, AdminSDHolder, and then also, from the other perspective, from the protection. I didn't know that I could protect the Active Directory dit file. And then also, which I found a really critical, was we can alert and protect the linking of gpos at the root of the domain or forest. So that would have been great to when I was a customer. Yeah, that's great. Well, I think that 7.3, guys, has also some new enhancements around the dit file protection. But let's park that for now. Ian, so what's new in Change Audit 7.3? There's a couple of brand new things. I think the first really new item is now publishing and forwarding events to Microsoft Sentinel. Sentinel is Microsoft's first foray into SIEM. It's hosted. So in prior versions, we could publish to Splunk or to our ArcSight, QRadar. We now have Sentinel on that list, as well. All the events that we're doing can be that we're doing on prem could be pushed up into Sentinel and added to that SIEM solution, as well. The development team is going to be doing a lot of work around modifying what we did with auditing SQL before. And they've just started to add in some previews of that with some new SQL auditing events that we've done. And again, we've improved security, the app, the Lava module. There's some PowerShell commands that you can use, as well. Some additional platform support you can run on server 22 now. Yeah, Bryan? Yeah, I just wanted to talk about security. Go ahead. Microsoft is deprecating RC4, a fantastic feature, as we can identify where RC4 is being used. So then you understand if something is going to break later on. Fortunately, we implemented this in our different labs beginning in November, it was middle of November that Microsoft, with one of the different updates, deprecated some different stuff. So I do have some audit events out there. Hopefully I never see RC4 again. But some organizations had to revert that back to understand where it's being used. Use that change-up for log-on capability. Identify that so you know what may actually be impacted. Yeah, good point, Bryan. Talk to us about what is administrator in the whole field. What's that all about? There's something else I love-- because you've heard me talk about BloodHound, Tier 0, all that other stuff. Then the searching under the who, we can actually search on who is administrator. There's a new is administrator, so we actually can look at all the activity from a person if that account has admin county claim one, they're nested domain admins, whatever, we can search off that instead of just particularly specifying domain admins, enterprise admins. There's help desk nested in domain admins. They'll show up in that different search now, as well. Yeah. And like Anna was talking about earlier, you obviously can set up an alert for that kind of search so that you are aware of these events as they happen. Ian, last question is, so for folks who want to upgrade to 7.3, what's the upgrade process look like? The upgrade process hasn't really changed. It's a direct upgrade from a prior version. I think we can even go back as far as some of 6.8, 6.9 versions. If you're on 7, it's just a straight upgrade. You will upgrade your coordinators. When you upgrade the first coordinator, it will also upgrade the database. So if you have more than one coordinator, you'll update the first one. That updates the database. Upgrade your second coordinator. Then you need to upgrade the clients. The client version has to match the coordinator version. So upgrade your stat clients. Upgrade your web clients. And then, if you need to or want to, you can go into the client at that point and push the updated agents out to the machines that support the newer agents, as well. So very straightforward. Great. Thank you guys so much. Anna, welcome to the team, and appreciate it. Talk to you later.
OPCFW_CODE
The following “polynomial-logarithmic” algebraic identity that one encounters on many occasions turns out to have a rather useful set of applications! POLYNOMIAL-LOGARITHMIC IDENTITY: If is a polynomial of degree with roots , then . PROOF: This one is left as a simple exercise. (Hint: Logarithms!) A nice application of the above identity is found in one of the exercises from the chapter titled Analysis (p120) in Proofs from the Book by Aigner, Ziegler and Hofmann. EXERCISE: Let be a non-constant polynomial with only real zeros. Show that for all . SOLUTION: If is a zero of , then the right hand side of the above inequality equals zero, and we are done. So, suppose is not a root of . Then, differentiating the above identity w.r.t. , we obtain , and we are done. It turns out that the above identity can also used to prove the well-known Gauss-Lucas theorem. GAUSS-LUCAS: If is a non-constant polynomial, then the zeros of lie in the convex hull of the roots of . PROOF: See this. HISTORY: The well-known Russian author V.V. Prasolov in his book Polynomials offers a brief and interesting historical background of the theorem, in which he points out that Gauss’ original proof (in 1836) of a variant of the theorem was motivated by physical concepts, and it was only in 1874 that F. Lucas, a French Engineer, formulated and proved the above theorem. (Note that the Gauss-Lucas theorem can also be thought of as some sort of a generalization (at least, in spirit!) of Rolle’s theorem.) Even though I knew the aforesaid identity before, it was once again brought to my attention through a nice (and elementary) article, titled On an Algebraic Identity by Roberto Bosch Cabrera, available at Mathematical Reflections. In particular, Cabrera offers a simple solution, based on an application of the given identity, to the following problem (posed in the 2006 4th issue of Mathematical Reflections), the solution to which had either escaped regular problem solvers or required knowledge of some tedious (albeit elementary) technique. PROBLEM: Evaluate the sum . (proposed by Dorin Andrica and Mihai Piticari.) SOLUTION: (Read Cabrera’s article.) There is yet another problem which has a nice solution based again on our beloved identity! PROBLEM: (Putnam A3/2005) Let be a polynomial of degree , all of whose zeros have absolute value 1 in the complex plane. Put . Show that all zeros of have absolute value 1. SOLUTION: (Again, read Cabrera’s article.)
OPCFW_CODE
Is there anyway, using VBA, to export a particular range of data that's been populated with a dynamic filter from my current workbook to a new workbook? My current workbook has 5 sheets. I want to export a range of data from Sheet1 to its own workbook. In Sheet1, I have Columns... I'm not sure if this is possible so would appreciate any responses. I'd like to be able to use a named range in an index but would like the named range to be dynamic. For example ideally the formula would be like =index(cell reference to dynamic array, etc). The problem is I don't... I am new to VBA. Currently I am using this, lastColumn is last column of worksheet. lastRow is last row of worksheet. It is working fine for: ActiveSheet.Range(Cells(1,1), Cells (lastRow,lastColumn)). RemoveDuplicates(Columns:=Array(1,2,3), Headers:=xlNo) - But it will work only for 3 columns... Let’s create a dynamic calendar in Excel the quick and easy way. We can use the clever new Excel function SEQUENCE in Excel 365 Insider. Mr. Excel (Bill Jelen) sparked this idea with this video and I enhanced the concept so that all we need is a date to make the calendar matarialise in the... I have two different but identical computers with Office 365 (up to date to 1911, monthly channel) + Win 10 (both x64). In one computer Excel dynamic arrays work as expected, but not in the other. Should this happen and is there any way to "fix" it? We can use Excel's new calc engine and dynamic array functions to create a loan amortisation schedule in a way that omits the need to create the correct number of rows or complex formulas to account for the correct number of rows. The formulas go in one row and they spill down to the correct... Hi, I was wondering if this is even a possibility or if anyone has done something like this where they could help me. I receive spreadsheets from our clients (with two columns) the amounts they need us to withdraw from their bank account weekly. and there's a rounding issue. Example, even... I'm looking for an Excel Formula and I hope that someone can help me out with this? I tried to draw a simplified version of the Excel below. I'm looking for a dynamic formula which returns the required training level for a specific Team. - For Team C - For Training 6 I am trying to use CountIf, but making the Range using a formula to find a row in another sheet. It is a little bit complex and I haven't found any examples or advice on how to do this correctly on any tutorials. I am hoping someone here could help me figure this out. Here are the two sheets... I am trying to calculate the Median & 75th Percentile for the range in array where the range differs in the length. For example, my data looks like the one given below (my data-set is almost 100,000 rows). Now I am trying to find the median and 75th percentile values of a combined set of 3... I am really new to this one.. and kinda got stuck. I am trying to add dynamic range and sum range to the SUMif Function.. but it's not working for me. when i break down the sum and the dynamic ranges its seems like they are working but not when i add them to the sumif. I have sales data for several weeks for over 100 products one sheet. Week numbers horizontally across the top, Product details vertically down the left. I have created a drop down list (separate sheet) of week numbers and I want to return the top 10 largest sales values depending on the week... I managed to run the script only once without error. But now I get a value error and it seems that the Redim is not resizing the arrays. Originally I used Redim Preserve but after the error I also tried to resize them at the initialization, but neither of them seems to work now. SO the issue I am having is that I need to create a macro that copies information into a new sheet and then runs a regression on that information. Sounds simple enough, right? So I recorded this Macro: ' h Macro ' Keyboard Shortcut: Ctrl+h I have a spreadsheet where one or two new rows are added every week. When I summarize my report I pick from row 2 to row n (last row value). The report has about 30 cells and currently I have to retype the last row number 30 times) I wanted to see if I can code something so that my report is... I've seen a number of threads that help with creating rank formulas that solve the duplicates problem by using COUNTIF statements, and others that allow for removal of blanks or #DIV/0! errors, but I'm using a set of data where cells are updating dynamically, and some of them might be numbers at... I'm not quite sure how to phrase my question, so I'll ask by way of an example. Let's say I have a dynamic data set like the one below. (Assume people are always adding, deleting, and changing entries.) <colgroup><col width="79" span="2" style="width:60pt">... I'm interested in creating a dynamic array. Essentially, I have a list of locations, and I would like that list of locations to populate into a subsequent list whenever someone places an "x" next to it. Like in the table below. Can someone please help? Place an "X"... Using VBA I am creating a dynamic array using the values present in a text file. Users are then prompted to type in a value via an input box; if the value entered in the input box is not in the array, an error message appears, prompting them to cancel or retry their entry.
OPCFW_CODE
Senior Full-Stack Engineer Senior Full-Stack Engineer – React/TypeScript & Node.js – Fully Remote – Noth America Only As a Senior Full-Stack Engineer you will be using React, TypeScript and Node.js to develop a gaming platform which allows millions of players to connect with each other under one umbrella. Players who play on different consoles will be able to play games with each other and they will be able to communicate on a specific platform. You will be using React and TypeScript on the Front-end for this development and Nodejs on the Back-end. You will be de designing system architecture applications to enhance high level games programming, taking part in performance testing and systems integration. A Senior Full-Stack Engineer will apply Agile methodologies and perform weekly sprints and daily stand ups. Working as a Senior Full-Stack Developer you will be part of a talented cross functional team of 10 members who are Back-end Engineers, QA’s, Product Owner, and Tech Leads. A React/TypeScript Full-Stack Engineer will design and implement scalable web services, applications, and APIs with suitable data structures and design patterns. This business has been created by industry experts of the gaming world such as EA, LucasArts and Epic Games. These legends have a lot of experience building and operating live systems and they craft technology solutions making the next generation video games. They are a business that focuses a lot on having the best culture, always empowering employees where they can make decisions which can help and benefit millions of users to have the best experience. They focus a lot on equal opportunity, being innovative and inclusive. A good Senior Full-stack Engineer will earn up to $160,000 per annum. On top of this you will receive a full range of health benefits. North America only – Fully remote role with offices in Seattle, Montreal, Vancouver, and New York. Why you should join You will be working in a team of World Class Engineers, and you will have the chance working with the latest technologies. You will have the opportunity to take part in social and sport activities, with unlimited PTO, life insurance and excellent health schemes. If you love playing video games, this opportunity will allow you to have access with the best games ever on any console. You will receive loads of training and there is opportunity to progress yourself. What you should do now? If you are a React Full-Stack Engineer and this seems like a great job for you, click the link and send me your CV. If you are a React Full-Stack Engineer have some questions, please call Maseeh at initi8 HQ. If you are not the React Full-Stack Engineer that we need, but know someone who is why not refer them to us and earn a referral reward (10% of the fee) #BeTheAgent Share this job Java Tech Lead Hybrid - London - Sponsorship offeredLondon,United Kingdom £ 90000 per annum The Job Working as a Java technical lead your job will be to lead a team of five Java developer and act as the lead developer to translate Technical Spec Senior QAParis,France,Île-de-France € 90000 per annum The Job As a senior QA Engineer you will be coming into a challenging and exciting environment while working with likeminded colleagues. You will bri
OPCFW_CODE
M: Show HN: Packagecloud - Hosted Package Repositories - ice799 https://packagecloud.io R: bachmeier Who is supposed to be willing to pay for this? Is it intended as a replacement for apt on Debian-based systems? It says "apt, yum, and rubygems repositories without the headaches". If I'm already using apt repositories without headaches, is this not for me? R: ice799 Hi! Just for fun: man reprepro and search for 'corrupt' :) You can use all your normal tools to upgrade, install, and remove packages as you normally would. So, for, Debian-based systems, apt-get upgrade, install, remove, etc all work as you expect. We provide SSL, gpg, and fine-grained access control all out of the box. Fine- grained access control doesn't really exist with reprepro or createrepo or other tools and you'd have to build it yourself. Also, you don't need to worry about backups, the numerous bugs in all the repo creation tools, and we have chef and puppet modules to help deploy this across your infrastructure. We have support for multiple linux distributions in a single repo (quite a pain to deal with yourself) and best of all we also support pushing multiple versions of a single package to a repo -- something that reprepro does not support, but has been in progress for ~4 years [1]. [1] [https://bugs.debian.org/cgi- bin/bugreport.cgi?bug=570623](https://bugs.debian.org/cgi- bin/bugreport.cgi?bug=570623) R: catern I'm not a fan of this approach. Why did you build new, proprietary software instead of adding that feature to reprepo, for example? Why is packagecloud.io not giving back to the open source community when it's based on packaging tools developed for open source software? The fact that your software is proprietary means when you disappear, so does my infrastructure. I certainly don't like giving up basic user freedoms in the area of software packages, which has historically been open source. Edit: to be less snarky R: serverascode I am using it, works great. I tried a couple other similar services and they were slightly more complicated than I wanted--packagecloud is simple and straightforward. Also I like the clean design of the site. R: chubot Aren't most package repositories just static HTTP file servers? I know that apt repositories are. They just rsync directory trees and run Apache with a basic configuration to serve them. If so, it should be pretty straightforward to make an AMI or other machine image that does this (and I'd be surprised if it doesn't exist). R: toomuchtodo Some repos are even hosted right out of S3 statically (Amazon Linux and Ubuntu in AWS come to mind). host us-east-1.ec2.archive.ubuntu.com us-east-1.ec2.archive.ubuntu.com is an alias for us-east-1.ec2.archive.ubuntu.com.s3.amazonaws.com. us-east-1.ec2.archive.ubuntu.com.s3.amazonaws.com is an alias for s3-1-w.amazonaws.com. R: eropple I evaluated Gemfury ([https://gemfury.com/](https://gemfury.com/)) not too long ago for Localytics, and I'll say the same thing to you that I did them: support Maven repos and we will write large checks. R: Moto7451 Neat, on the Perl side of things there's also Stratopan. [https://stratopan.com](https://stratopan.com) I'm guessing there are a few other similar platform specific services out there as well. R: michaelmior Looks cool, but it would be really nice to see upfront what types of repositories are supported. It seems that it's currently Debian packages, RPMs, and gems, but I'm not sure because there wasn't a list anywhere. R: osivertsson Nice! I could see myself paying for such a service, if the need should arise in the future. R: ErikRogneby Puppet and Chef support are a nice touch! R: ice799 Thanks!
HACKER_NEWS
Are there any drawbacks to using a T-Mounted lens? I am looking to buy a mirror lens for the upcoming solar eclipse. I am using a Canon with an EF mount. Most of the lens options I see require a T-Mount adapter to fit on my canon. Are there any image quality/focus limitation/other drawbacks I should consider before purchasing one of these lenses? With other adapter types, I know there is the inherent problem of loosing inifinity focus. Does this same limitation apply to other adapters, including the T-Mount adapter? Theoretically speaking, using a T-mount lens to shoot an eclipse should create no such issues related to infinity focus/image quality/etc. Most DSLRs that shoot through a telescope are attached via T-mount adapters and, depending on the quality of the telescope's optics and the technique of the photographer, images taken using such an arrangement can be quite spectacular. Infinity focus is not an issue because the T-mount specification has a flange focal distance of 55mm. Most DSLRs and even older 35mm film camera systems have FFDs in the 40-48mm range.The adapter that screws onto the 42x0.75mm T-threads needs to be anywhere from 7mm to 15mm thick to fill in the distance between the camera and the lens. Thus no additional optics are needed to achieve infinity focus. Of course there is no communication between the lens and the camera so you need to be sure to use a camera that can shoot in Manual exposure mode without a lens detected. There are a few entry level cameras from various manufacturers that do not have such a capability. You'll also need to focus manually, but for astrophotography that's not much of an issue since 1) Most astrophotography is done at infinity focus and 2) Most camera's can't AF on objects in the the night sky other than the moon. Depending on what type of solar filter you use to image the sun, even if you had an AF capable lens it might or might not AF correctly through the filter. In any case careful manual focus, typically using magnified Live View, will usually get better results with astrophotography. In the real world, though, things are a bit different. Most mirror lenses designed to be primarily a camera lens that have a T-thread at the rear of the lens for use with cameras via a T-mount adapter aren't that good optically. This is particularly the case with the cheapest ones. You're also limited to a single aperture with almost all such lenses, which may reduce your options regarding ISO and shutter time. Note that cheap refractive lenses intended primarily as photographic lenses that have T-mount connectors are even worse optically than their mirrored counterparts. Higher quality mirror lenses tend to be made with specific lens mount connectors permanently attached to the rear of the lens, rather than T-threads for a T-mount adapter. No problems with infinity focus with T mount lenses. The T mount was designed specifically to let you use the same T mount lens with different camera brands by using the appropriate T Ring for the camera - the depth of the T ring compensates for the different camera flange to film/sensor distances. Big drawback with the T mount is that there's no mechanical or electronic aperture coupling available - which means that a lot of entry level DSLRs won't meter with the lens at all (higher end Nikons that didn't need the electronic coupling worked OK; not sure what the situation is for Canon bodies). Also, native Canon lenses can have a slightly bigger clear aperture than the T mount diameter - so the native lenses can go to slightly faster max aperture values than a T mount equivalent, but that's not usually an issue for the sort of lenses that come in T mount fittings these days. I've used five different mirror lenses over the years: -An old Meade 1000mm f11 camera lens/spotting scope (basically, this was a camera lens focusing style mak scope that worked either as a camera lens (with an extension tube to a T ring) or a telescope (with a star diagonal). -An old Tamron 500mm / f8 mirror lens with an adaptall mount (similar idea to the T mount, but extended so that (with other lenses) you had basic mechanical aperture coupling. The mirror lens was fixed at f8, though A Minolta 300mm AF mirror lens for their APS SLR Old and newer versions of the Nikon 500/f8 mirror lens. Of those, the AF on the Minolta lens worked fine - I'm surprised there aren't more AF mirrors around. The Tamron and the newer Nikon also made nice butterfly lenses - they both focused close enough that you could get a butterfly to a decent size in the frame from far enough away that you didn't frighten it off. The older Nikon had a much longer minimum focus distance, which is why I eventually switched to the newer version. Quality on the Nikons was higher than the Tamron, but the Tamron was pretty good - and packed down smaller than the Nikon (The lens hood reversed around the lens for transport, which gave you a shorter body). The 1000mm Meade lens worked pretty well, on a tripod for wildlife shots or handheld in bright lighting for things like motor racing shots. But it benefitted from improvising a lens hood, since the front element was basically at the front of the tube. Usable without, but contrast improved with the hood. These days, I usually use my 80-400mm Nikon image stabilised zoom - it's not quite as long as the 500mm mirrors were, but the AF and VR capability make up for that, and the close focus at 400mm also makes for a nice butterfly/ large insect lens. Or if it's worth the hassle of carting a heavy manual focus setup along, then I can use my 106mm astrograph refractor at 530mm f5 or 840mm f8, which gives extremely high quality results. But by the time you add a suitable tripod and mount, it's bulky and heavy enough that it's really restricted to specially planned trips - not something you'd normally walk around with. Of the various mirror lenses you mention, it appears only the first (1000mm Meade) is a T-mount lens. Is this correct? Yes. The Nikons were Nikon mount, and the Tamron 500mm used their adaptall mount - a similar idea to their original T mount (That's where the "T" comes from ) but extended to allow mechanical aperture control and meter coupling. Since the T mount has no mechanical or electronic linkages for the aperture, it's restricted to fixed aperture lenses (like mirror ones) or preset aperture ones (where there's one ring to set the aperture, and second to open/close it so you can fully open the aperture to focus, or close it down to the preset value to take the picture or check depth of field. T mount adapters and T rings (and sometimes so-called "wide T" versions that use a larger diameter (some are 48mm vs the normal T mount 42mm diameter for more clear aperture) are also commonly used to connect cameras to astronomical telescopes - either by having a T thread on the scope focuser (or via a threaded adapter to the T thread), with a T ring attached, or via a 2" eyepiece barrel to T thread adapter with a T ring (Though you can also get one piece 2" barrel to camera mount adapters, which may allow more clear aperture than the 42mm diameter T threaded tube). Yes. But those telescopes with 48mm wide T-mounts, or even the higher quality instruments with 42mm T-mounts, are primarily intended for use as astronomical telescopes, not as all purpose camera lenses. Most cheap mirror lenses with T-mount adapters are the later, not the former.
STACK_EXCHANGE
New names can be registered (inscribed for the first time) as ordinals with only a Bitcoin transaction. This process is open to anyone that can write to Bitcoin. To register a new name within the Sats Name System simply inscribe an ordinal containing your desired name. Here is an example. Advanced registration allows users to additional data to a name using JSON5 syntax. The format is optional for registration. It will be required for update operations. Here is an example. Protocol: Helps other systems identify and process SNS names Operation: Helps indexers compute name state Name: Your desired name. Including .sats is required. These apply to both registration types: - Only one name per ordinal - Any UTF-8 character is valid - Capitalization does not matter. All names will be registered as lowercase. - No spaces are permitted within a name These instructions explain the protocol logic and instructions for extracting and validating names so you can properly index them and determine the first instanace of each name. These instructions assume you have access to the entire ordinals data structure with basic details like content type, ID, as well as the content. To materialize Sats Names correctly you must index all ordinals back to inscription number 159710. Validate that the inscription's content type is any of these: textas the raw text contents of an inscription. For example, textwould be the raw contents returned at this Ordinals.com URL. The first instance of each name is the only valid instance of the name in the Sats Name System. We determine which name is first by inscription number. The following instructions will tell you how to extract and validate the name so you can compare inscription number and determine the first. - 1.Validate if the text is valid JSON according to the JSON5 standard. To check if the inscription is JSON, try parsing the text. If an error is thrown (invalid JSON), skip to the "plain text" instructions listed under Validate Names. - 2.Check that the JSON has the required properties: nameis present and is of type nameis extracted, follow the steps proceed to the Validate names steps. Trailing commas are valid in JSON5. New lines inside values are not valid. - 1.Turn the string into lowercase - 2.Delete everything after the first whitespace or newline ( - 3.Trim all whitespace and newlines - 4.Validate that there is only one period ( .) in the name - 5.Validate that the string ends with If you have a valid .sats name compare all other instances of the same name. The first instance of the name according to inscription number is the only valid Sats Name, or "first" of this name. Make sure you convert all queries to lowercase, otherwise you might inadvertently show name as available when they are registered. Make sure your API returns a decoded version of the UTF-8 name. Scammers hide hidden characters inside names and by including this information clients can flag deceptive names. Queries for names that include ?will fail unless you convert them to URL encoding reference first.
OPCFW_CODE
Multi-step protocol for HTVS¶ For high-throughput virtual screening (HTVS) applications, where computing performance is important, the recommended RxDock protocol is to limit the search space (i.e. rigid receptor), apply the grid-based scoring function and/or to use a multi-step protocol to stop sampling of poor scorers as soon as possible. Using a multi-step protocol for the DUD system COMT, the computational time can be reduced by 7.5-fold without affecting performance by: Running 5 docking runs for all ligands; ligands achieving a score of -22 or lower run 10 further runs; for those ligands achieving a score of -25 or lower, continue up to 50 runs. The optimal protocol is specific for each particular system and parameter-set, but can be identified with a purpose-built script (see the Reference guide, section Here you will find a tutorial to show you how to create and run a multi-step protocol for a HTVS campaign. Step 1: Create the multi-step protocol¶ These are the instructions for running rbhtfinder: 1st) exhaustive docking of a small representative part of the whole library. 2nd) Store the result of sdreport -t over that exhaustive dock. in file that will be the input of this script. 3rd) rbhtfinder <sdreport_file> <output_file> <thr1max> <thr1min> <ns1> <ns2> <ns1> and <ns2> are the number of steps in stage 1 and in stage 2. If not present, the default values are 5 and 15 <thrmax> and <thrmin> setup the range of thresholds that will be simulated in stage 1. The threshold of stage 2 depends on the value of the threshold of stage 1. An input of -22 -24 will try protocols: 5 -22 15 -27 5 -22 15 -28 5 -22 15 -29 5 -23 15 -28 5 -23 15 -29 5 -23 15 -30 5 -24 15 -29 5 -24 15 -30 5 -24 15 -31 Output of the program is a 7 column values. First column represents the time. This is a percentage of the time it would take to do the docking in exhaustive mode, i.e. docking each ligand 100 times. Anything above 12 is too long. Second column is the first percentage. Percentage of ligands that pass the first stage. Third column is the second percentage. Percentage of ligands that pass the second stage. The four last columns represent the protocol. All the protocols tried are written at the end. The ones for which time is less than 12%, perc1 is less than 30% and perc2 is less than 5% but bigger than 1% will have a series of *** after, to indicate they are good choices WARNING! This is a simulation based in a small set. The numbers are an indication, not factual values. Step 1, substep 1: Exhaustive docking¶ Hence, as stated, the first step is to run an exhaustive docking of a representative part of the whole desired library to dock. For RxDock, exhaustive docking means doing 100 runs for each ligand, whereas standard docking means 50 runs for each ligand: $ rbdock -i INPUT.sd -o OUTPUT -r PRMFILE.prm -p dock.prm -n 100 Step 1, substep 2: Once the exhaustive docking has finished, the results have to be saved in a single file and the output of the script sdreport -t will be used as $ sdreport -t OUTPUT.sd > sdreport_results.txt Step 1, substep 3: The last step is to run the rbhtfinder script (download $ rbhtfinder sdreport_results.txt htvs_protocol.txt -10 -20 7 25 Which will result in a file called The parameters are explained in the script instructions. They are not always the same and as they depend on the system, you will probably have to play a little with different values in order to obtain good parameters sets (marked with *** in the output). This will happen when time is less than 12%, perc1 (number of ligands that pass the first filter) is less than 30% and perc2 (number of ligands that pass the second filter) is less than 5% but bigger than 1%. Step 2: Run docking with the multi-step protocol¶ The script finished with two good parameters sets: TIME PERC1 PERC2 N1 THR1 N2 THR2 [...] 11.928, 27.461, 3.207, 7, -12, 25, -17 *** [...] 10.508, 18.773, 1.511, 7, -13, 25, -18 *** [...] These parameters have to be adapted to a file with the HTVS protocol format that RxDock understands. A template file looks as follows ( are the parameters found above): 3 if - <THR1> SCORE.INTER 1.0 if - SCORE.NRUNS <N1-1> 0.0 -1.0, if - <THR2> SCORE.INTER 1.0 if - SCORE.NRUNS <N2-1> 0.0 -1.0, if - SCORE.NRUNS 49 0.0 -1.0, 1 - SCORE.INTER -10, It is divided in 2 sections, Running Filters and Writing Filters (defined by the lines with one number). The first line (the number 3) indicates the number of lines in the Running Filters: The first filter is defined as follows: if the number of runs reaches N1and the score is lower than THR1, continue to filter 2, else stop with that ligand and go to the next one. The second filter is defined similar to the first one: if the number of runs reaches N2and the score is lower than THR2, continue to filter 3, else stop and go to the next ligand. If a ligand has passed the first two filters, continue up to 50 runs. The fifth line (the number 1 after the three Running Filters) indicates the number of lines in the Writing Filters: Only print out all those poses where SCORE.INTERis lower than -10 (for avoiding excessive printing). For the parameters obtained in the first Section of this tutorial (first line ***), we will have to generate a file as follows: 3 if - -12 SCORE.INTER 1.0 if - SCORE.NRUNS 6 0.0 -1.0, if - -17 SCORE.INTER 1.0 if - SCORE.NRUNS 24 0.0 -1.0, if - SCORE.NRUNS 49 0.0 -1.0, 1 - SCORE.INTER -10, Please note that the parameters N2 are 7 and 25 but we write 6 and 24, respectively, as stated in the template. Finally, run RxDock changing the flag -n XX for $ rbdock -i INPUT.sd -o OUTPUT -r PRMFILE.prm -p dock.prm -t PROTOCOLFILE.txt
OPCFW_CODE
What does SetWindowsHookEx do? The SetWindowsHookEx function will install the hook routine into the hook chain of the victim.exe process, which will be invoked whenever certain event is triggered. What is Wh_keyboard_ll? WH_KEYBOARD_LL. The WH_KEYBOARD_LL hook enables you to monitor keyboard input events about to be posted in a thread input queue. What is CallNextHookEx? CallNextHookEx calls the next hook in the chain. Calling CallNextHookEx is optional, but it is highly recommended; otherwise, other applications that have installed hooks will not receive hook notifications and may behave incorrectly as a result. What are window hooks? A hook is a point in the system message-handling mechanism where an application can install a subroutine to monitor the message traffic in the system and process certain types of messages before they reach the target window procedure. What is a hook C++? Normally when you “hook” into the DLL, you actually put your function in front of the one in the DLL that gets called, so your function gets called instead. You then capture whatever you want, call the other function, capture its return values and whatever else, then return to the original caller. What is easy hook? EasyHook makes it possible to extend (via hooking) unmanaged code APIs with pure managed functions, from within a fully managed environment on 32- or 64-bit Windows XP SP2, Windows Vista x64, Windows Server 2008 x64, Windows 7, Windows 8.1, and Windows 10. EasyHook supports injecting assemblies built for . What are hooks used for? A hook is a hand tool used for securing and moving loads. It consists of a round wooden handle with a strong metal hook about 8″ long projecting at a right angle from the center of the handle. Is DLL injector safe? Simple Injector is thread-safe and its lock-free design allows it to scale linearly with the number of available processors and threads. You will find the speed of resolving an object graph comparable to hard-wired object instantiation. What is the use of setwindowshookex in Windows? SetWindowsHookEx function is very useful for retrieving event messages from a user regardless of what application is active. It’s not necessary for a Windows application to have a window or a console active. Windows application can run freely in the background without any appearance available. Where is the callback function in setwindowshookex? SetWindowsHookEx call’s a callback function if a user typed something on the keyboard or moved the mouse. This callback function is usually located in a DLL and it’s implemented by a programmer from scratch. An additional program needs to load the DLL, register the hook and set it up globally before hooking other applications. When to release a hook procedure in Win32? Release a hook procedure as soon as your application no longer needs it. You can release a global hook procedure by using UnhookWindowsHookEx, but this function does not free the DLL containing the hook procedure. Where are low level hooks called in Stack Overflow? Low-level hook procedures are called on the thread that installed the hook. Low-level hooks do not require that the hook procedure be implemented in a DLL. Thanks for contributing an answer to Stack Overflow!
OPCFW_CODE
Update a column value if two specific conditions are met in MySQL I'm very much a newbie to programming. I am trying to update my table column labelled 'bonus' with a new value of 505.99 if two conditions are met: if the givenname is Mary and their bonus is currently 155.99, or if their occupation is a Model and their bonus is also currently 155.99. 7 rows should be updated but only 1 is being updated. The query looks like it should work to me so wondering what I am missing? Looking for any pointers! Thanks in advance UPDATE customers SET bonus = 505.99 WHERE occupation = 'Model' AND bonus = 155.99 OR givenname = 'Mary' AND bonus = 155.99; Can you reproduce your issue with sample data in a fiddle? Which one is updated? do you have an issue with case sensitivity? please publish sample date and table definition as text or to fiddle - I cannot reproduce your issue https://dbfiddle.uk/3gNMSgWR Can you try to use parentheses like these? UPDATE customers SET bonus = 505.99 WHERE (occupation = 'Model' AND bonus = 155.99) OR (givenname = 'Mary' AND bonus = 155.99); How is this different? @forpas Here are some good examples and explanations: https://stackoverflow.com/questions/16122695/are-brackets-in-the-where-clause-standard-sql Besides making it more readable they also can affect the evaluation order, similar to normal math. 5 + 5 * 2 = 5 + 10 = 15 is different than (5 + 5) * 2 = 10 * 2 = 20. No, the parentheses do not affect the evaluation order because a and b or c and d is evaluated as (a and b) or (c and d). Yeah you're right about the order of evaluation in this case. Nevertheless it adds more readability and clarity IMHO. Does it solve any problem? You should use AND and OR conditions properly when you use those simultaneously. Let us build your query : As we know we are going to set value wherever following holds true. Either occupation = 'Model' OR givenname = 'Mary' This should be written with OR together bonus = 155.99 : This we can add separately using AND in the select query. So; the correct condition to use is (occupation = 'Model' OR givenname = 'Mary') and bonus = 155.99; We can re-write the above query as : UPDATE customers SET bonus = 505.99 WHERE bonus = 155.99 AND (occupation = 'Model' OR givenname = 'Mary'); You can find more good examples here : How is this different? Hi @forpas: I am applying the rule similar to BODMAS for where condition. You can find similar examples here : https://codepedia.info/sql-using-parentheses-with-and-or-condition-is-necessary-with-where-clause-to-gives-expected-result Does the rule that you apply make any difference to the evaluation of the condition in the WHERE clause? Let me explain you this with sqlfiddle. I am creating one. Please give me 10 minutes I am new to sqlfiddle. Please give me little more time; I shall insert sample data similar to OP and show it
STACK_EXCHANGE
This book is written for new users, who can use it as a self-study resource to learn Visio 2013. You can change the interval type through the right-click menu. Move the text off a timeline Timelines can sometimes get crowded. Attach the control handles of the leader line for each milestone or interval to the guide. More expanded timelines can also be added on top of the existing expanded timelines. I go back in to change the date and it won't stick. With the new Visio you can quickly create clean, organized timelines that make it easy to order events and share information. The Configure Timeline dialog box opens. In addition to that, you can create drawings with great accuracy. The Save page shows the options to set the file format, auto recovery settings, and server files location. For example, the expanded timeline can show many details of events in a particular period, which might not be visible on the main timeline. For example, when you drop an interval on a cylindrical timeline, the interval adjusts to fit the curved shape. Improvements we made across the product allow the new timelines to take on a variety of different looks to create beautiful, professional diagrams. Summary This book helps you to learn the basics of Microsoft Visio 2013 using brief explanations, and step-by-step examples. Templates available in Visio 2013 Visio 2013 offers many templates to carry out different types of diagrams. This becomes very useful for large flowcharts where using long connectors can be confusing and hard to follow. The proceeding tutorials will help you to work with various types of diagrams that can be created using Visio. These synchronize automatically so that if you change them on one timeline, they also change on the other. Show details for a section of a timeline Expand a segment of an existing timeline as a second timeline to show more detail. Tutorial 12, Creating Detailed Network Diagrams, covers how to create network diagrams. I am using the straight line timline and the straight line milestone marker with the description at the top of the marker. Tip: An entire row of milestones can be adjusted by simply moving the guide up or down. Visio Standard 2019 and Visio Professional 2019 are the latest perpetual versions of Visio. For instance the date above the description is 1984, but I have to move the line over to the right so it doesn't bump into the marker to the left dated 1981. If your connector has arrows, you can also change the direction of the arrows by selecting the connection and then clicking Reverse Ends. Remember that the first step is always the toughest step, and the aim of this post is to provide you with resources that will help you take your first step in Microsoft Visio. Using a combination of shapes, text, and lines, a flowchart created in Visio is easy to understand and relatively simple to create. You can also change the milestone by opening the Configure Milestone dialog box again by right-clicking the milestone, clicking Configure Milestone, and then changing the date. Visio desktop comes with a robust library of built-in and third-party templates and shapes, as well as integrated collaboration tools. Updated positioning behavior makes it quick to align events and unclutter busy regions. Flowcharts are diagrams that show the steps in a process. Separate phases during the project can be represented by Interval shapes; as these are added to the timeline in Visio a dialog box opens to allow them to be configured. What you will notice is that the format, the default format, for Visio is Arial 8 point font. Let us know in the comments below! Tutorial 1, Creating Basic Flow Charts, teaches you to create basic diagrams using Visio stencils. Visio Professional 2019 Visio Standard 2019 Visio 2013 Visio Professional 2016 Visio Standard 2016 Visio 2010 Visio 2007 Visio Online Plan 2 Visio Premium 2010 Visio Professional 2013 Visio Standard 2007 Visio Standard 2010 Capturing and communicating project milestones is easy with a timeline — a horizontal or vertical line that represents the life of a project, with markers that indicate project phases and milestones. Expanded timelines are useful to get a zoomed in view of the finer aspects of a time period. Drag the control handle above the interval shape to move the text to a better location. To distinguish different categories of events or highlight significant deadlines, set the milestone type by using the right-click menu. Each time you open a timeline drawing with an Elapsed time or Today marker shape on it, the shapes shift to align with the current real-world date. You can select different types of templates from the Backstage to start new drawing. The yellow ends can be used to move or change the orientation of the expanded timeline. Under Formatting, select or clear check boxes to show or hide dates or time scale markings on the timeline. Tip: To quickly change the time span of an interval, drag the green selection handles or move the interval where you want it on the timeline. The expanded timeline uses the same shape type as the original timeline by default. In many ways Gantt charts combine the features of a Timeline and a spreadsheet into one neat package that gives you an overview of the important milestones of a project over time — along with additional cells for information about duration, resources or other parameters. Apply Quick Styles to milestones and intervals to visually differentiate between categories of events.
OPCFW_CODE
Joe and Theo, This is clearly a well-thought out bit of work. I assume that the blank node solution is viable, but I wonder if you contemplated another possibility: that the series is a bibliographic entity on its own that has a relationship with monograph? This would make sense given that: - a series is a work in its own right - there can be catalog entries for the series itself - some series have authority records The advantage of this solution is that it identifies a series with a persistent identifier that would allow one to link all members of the series, e.g. to show a list of monographs in the series. The following are examples of the kind of modeling Karen is describing. Series as a typed resource with persistent identifiers that would allow one to link all of its members (e.g. to show a list of monographs in the series). For those interested, the raw data of the first Series can be extracted from the RDFa of the page and seen here: These Series are materialized from collections of MARC records that share MARC 830 properties. While there is certainly more testing to do, I continue to be impressed how well existing MARC records map to this paticular linked data pattern. The disadvantage is that it has to solve these problems: 1. what to do with series that usually do not get authority control (e.g. the publishers' series) This is not an issue of identifiers but a general one of determining when things are the same and when they’re not. (my take: treat them as entities and give them an ID) Agreed. And the ability to give these resources identifiers at the granularity your describing provides a basis for more collaborative, value-add curation going forward. 2. at what "level" to link a series to a bf:Instance (the bf:Work for the series?) These questions always fall back “it depends” and benefits from concrete use cases. The #2 question there is one that is even more complex with FRBR, but is true for all multi-entity bibliographic models, which is that bibliographic relationships need to be made between entities and it isn't always clear which entities are appropriate for the relationship. In this case, the series as a bf:Work is manifested as a group of bf:Instance's with a "partOf" relationship. There's no single publication that is the bf:Instance of the series. From a visibility use-case perspective, the current approach we’re taking is to define a Series as a Collection (of Works). As Collections are a subClass of Work, Series can have members which are other Series. The relationship “memberOf” associates Works as part of a Series (Collection). The topology (as illustrative above by clicking around and following your nose) is pretty interesting and this helps raise the visibility of these resources. Joe and Theo’s suggestion of “series” (as partOf refinement) is a further refinement of “memberOf” but that level of specificity may be less important once one treats Series as a typed resource. That said, their document is good analysis and, as this is such a complex issue, more thought, experimentation and exploration is certainly required.
OPCFW_CODE
Hosted project proposal: StarlingMonkey Proposing the adoption of StarlingMonkey as a Bytecode Alliance hosted project. Repository URL: https://github.com/bytecodealliance/StarlingMonkey StarlingMonkey StarlingMonkey is a component-native JavaScript runtime targeting WASIp2 or other sufficiently capable host environments. It's designed to be highly modular and is easily extensible with additional builtins. StarlingMonkey is built on SpiderMonkey, the JS engine used by Firefox, and is thus highly spec compliant and extremely robust. Additionally, it includes an implementation of various web standards, such as the fetch API, WHATWG Streams, and a subset of the Web Crypto API. StarlingMonkey is derived from Fastly's js-compute-runtime project, but has been significantly refactored and extended to be more modular and embeddable as part of an effort to integrate it into Fermyon's JS SDK for the Spin runtime. (Note to observers: the StarlingMonkey repository is already in the Bytecode Alliance organization, but it's not yet an official Bytecode Alliance project. This was agreed on by the TSC a few months back, with this proposal being intended to formalize the status as a hosted project.) Requirements Alignment with the Bytecode Alliance Mission I believe StarlingMonkey project aligns strongly with the Bytecode Alliance Mission by providing a robust toolchain for using JavaScript to develop WebAssembly Components. StarlingMonkey is designed to be embeddable and extensible through hooks in its CMake based build system: it can be integrated into downstream projects and extended with additional builtins without changes to the code base itself. Additionally, all builtins are configurable and StarlingMonkey's host API is abstracted such that it can target other environments than WASIp2, meaning that the runtime can be tailored to the specific needs of downstream integrations. One such integration is the existing ComponentizeJS project, through which it is easy to target arbitrary WIT interfaces using JavaScript without the need to compile custom runtimes. Code Review Description Reviews by the members of the starlingmonkey-core team are required for all PRs, with a high code quality standard for all changes. Code of Conduct The project follows the Bytecode Alliance Code of Conduct. Continuous Integration Testing StarlingMonkey has a continuous testing setup consisting of multiple kinds of tests: End-to-end tests that fully provide their own code and are checked against expected output Integration tests using a JS harness and testing builtins such as assertions A harness for running a relevant subset of the Web Platform Tests suite, ensuring a high degree of spec conformance of the implementations of builtins. Contributor Documentation The repo contains a README file that provides an overview of the project and how to build and run it. An issue is on file to provide more technical and architectural documentation and expand on conbtributor facing docs. Following the Bytecode Alliance Operational Principles The StarlingMonkey project is committed to be interoperable with other projects and follows a standards-first approach: it targets WASIp2 and aims for high compliance with web standards wherever applicable. Licensing Compatible with the Bytecode Alliance Currently, the project uses the BA's default license, Apache-2.0 WITH LLVM-exception, for all code in the repository. There's an intent to import Rust code under the MPL-2.0 license, which is compatible with the Apache-2.0 license and used by the upstream SpiderMonkey project. The Bytecode Alliance's board of directors has already granted an exception for this as required by the BA's IP policy. README A README can be found at https://github.com/bytecodealliance/StarlingMonkey/blob/main/README.md Upon acceptance of this application, a header indentifying StarlingMonkey as a Bytecode Alliance project will be added. Release Process StarlingMonkey itself doesn't have an automated release process yet, but one is available through the integration into the ComponentizeJS project. An issue is on file to automate the release process for StarlingMonkey itself. Security Process StarlingMonkey is fully run in a sandboxed environment and doesn't have access to the host system, so the attack surface is significantly reduced. Nevertheless, an issue is on file for defining a formal security process for the project. Semantic Versioning The project itself doesn't yet have any formal releases. Instead, ComponentizeJS and other downstream projects update the version used by depending on a specific commit hash. An issue is on file to implement semantic versioning for the project. Secrets Management The project doesn't handle any secrets at the moment. If and when that changes, it will use GitHub secrets for repository-wide secrets. Supply Chain Security StarlingMonkey has few upstream dependencies, with the most notable ones being SpiderMonkey and OpenSSL—both projects with a very high level of scrutiny. In both cases, official release versions are used, with additional patches carried downstream where needed. Sustainable Contributor Base StarlingMonkey receives regular contributions from multiple developers working at multiple organizations, and has a core maintainer team with members of two different Bytecode Alliance member organizations. Version Control StarlingMonkey is already hosted in the Bytecode Alliance GitHub organization. Recommendations Changelog The project will follow keepachangelog.com once it's feature-complete and 1.0 version is released. Continuous Fuzzing no fuzzing is currently performed. While it'll make sense to eventually change that, the fact that the runtime is fully sandboxed and doesn't have access to the host system significantly reduces the attack surface. End-User Documentation StarlingMonkey itself doesn't really contain end-user documentation, as it's intended to be embedded into other projects. However, the ComponentizeJS project provides a high-level overview of how to use the runtime. Issue Triage Process The project uses GitHub to track issues and manage pull requests. A GitHub project is used to track blockers for a 1.0 release. Leverage the Bytecode Alliance RFC Process StarlingMonkey does not leverage the RFC process yet, mostly due to being a new project, which is not feature-complete yet, but it's intended to do so as part of the stabilization process. Production Use Multiple organizations are already using StarlingMonkey in production, including Fastly and Fermyon as part of the JS SDKs for their platforms. Public Project Meetings and Notes Currently, no public meetings are taking place for StarlingMonkey, but the JS subgroup of SIG-guest-languages is used to discuss the project. Sanitizers and Code Analysis SpiderMonkey is fully running inside the WebAssembly sandbox, making the use of sanitizers less critical. However, the project is considering adding sanitizers to the build process to catch potential issues early. As a note, as the person making this proposal I'll recuse myself from all aspects of the evaluation by the TSC. We discussed this at today's TSC meeting and I'm happy to announce that we have approved this proposal! Please work with @disquisitioner and the TSC for any administrative follow ups, thanks!
GITHUB_ARCHIVE
Analysis of Optimization Technique of Same Program Written in Two Different Interfaces i.e. CUI and GUI Using Java and Calculate Their Differences |© 2021 by IJCTT Journal| |Year of Publication : 2021| |Authors : Dr. Arpana Chaturvedi, Dr. Deepti Khanna, Deepit Aggarwal, Ayush Prasad| |DOI : 10.14445/22312803/IJCTT-V69I3P109| How to Cite? Dr. Arpana Chaturvedi, Dr. Deepti Khanna, Deepit Aggarwal, Ayush Prasad, "Analysis of Optimization Technique of Same Program Written in Two Different Interfaces i.e. CUI and GUI Using Java and Calculate Their Differences," International Journal of Computer Trends and Technology, vol. 69, no. 3, pp. 46-51, 2021. Crossref, 10.14445/22312803/IJCTT-V69I3P101 Java is one of the most stable programming languages and form time to time Oracle Corporation frequently updates the language , it is also platform independent and supports common programming paradigms has got rich set of APIs , loads of frameworks, Libraries, IDEs and development tools ,simplify Development of real-time software , facilitates embedded computing and is robust and secure ,so a vast majority of applications use JAVA Programming Language .Here in our research work we have developed a program named inventory control management system. It’s a CUI program which is created in core java. A GUI application is also created of the same program with the help of JFrame using event driven programming. in our research work we have used different optimization techniques such as CPU utilization, Heap Count and Threads, etc. to study performance on CUI and GUI, based upon the result we tried to find out which interface is better in terms of memory utilization and CPU utilization. Java program, Optimization techniques, GUI, CUI, JFrame. Performance Monitoring Of JAVA Application Analyzing the Complexity of Java Programs using Object Oriented Software Metrics. Dynamic analysis of Java program concepts for visualization and profiling Apache Software Foundation, Log4J S. Browne, J. Dongarra, N. Garner, G. Ho, P. Mucci, A Portable Programming Interface for Performance Evaluation on Modern Processors, The International Journal of High-Performance Computing Applications 14, 3:189-204 (Fall), 2000. M. Dahm, The Byte Code Engineering Library, 2001. E. Gamma, R. Helm, R. Johnson, J. Vlissides, Design Patterns: Elements of Reusable Object-Oriented Software, Addison-Wesley, 1995. J. Gosling, B. Joy, G. Steele, The Java Language Specification, Addison-Wesley, 1996. C.A.R. Hoare, Monitors: An Operating System Structuring Concept, Comm. ACM 17, 10:549-557 (October), 1974. M. Henning, S. Vinoski, Advanced CORBA Programming with C++, ISBN 0201379279, Addison-Wesley, 1999. IBM Research, Jinsight project,2001. Intel, VTune Performance Analyzer,2001. Intel Corporation, Intel Architecture Software Developer’s Manual Volume 3: System Programming Guide, 1997. IONA Technologies, Object Oriented Concepts Inc., ORBacus 4 for Java, 2000. R, Jain, The Art of Computer Systems Performance Analysis: Techniques for Experimental Design, Measurement, Simulation, and Modeling, John Wiley & Sons, 1991
OPCFW_CODE
Business Tools Fall Update Here’s an update on the progress we’re making on the major rewrite of the Commerce Server business tools. You may recall from an earlier blog post, we started off the process with an extensive storyboarding exercise, where we focused our thoughts on the roles of people using the tools and the tasks that they might be undertaking. In the intervening months, we’ve been heads down with our UX consultants Toledo2 to refine those ideas into wireframes and then one step further into a first look at the visuals. Our goal throughout this process is to be able to deliver a Customer Technology Preview (CTP) towards the end of 2013 and an initial release shortly thereafter. The final refinements would then come from our customers and partners as we move through the process of delivering frequent, incremental iterations to the tools throughout 2014 (and beyond!). As the storyboarding moved towards wireframing, phases started to emerge from the process which enabled us to chunk the work into manageable and deliverable pieces. As this stands today, the work items are broken down as follows: - First, a CTP focused on core functionality from Catalog Manager - Next, our Initial Release aimed at replacing Catalog Manager - Then, agile releases on a very frequent basis (think weeks to months) to replace the other core scenarios from Marketing Manager and Customer and Order Manager as well as the addition of new features not in any of the business tools today By decoupling the business tools from the core server release, we can respond to customer feedback, new user scenarios and use cases, and add features on a far more rapid basis than anything previously experienced in Commerce Server history. Search & Disclosure One paradigm that is certainly changing, is our approach to finding data to work on. We want our users to be able to work quickly on any aspect of data and to facilitate that, we’re making faceted search a key entry point into the tools. Why drill down through complex trees, when you can just type a key word or product code and let the tool take you straight there? We think it will significantly speed up product and catalog editing, especially with the use of facets to refine the search further. And we’re also attempting to provide more context around visual search results. The interface will be able to disclose information around a selected product, which might be how many categories it appears in, or what discounts apply to it. As you can hopefully see, this isn’t just an exercise in replacing the existing tools, it’s an exercise in reimagining the entire workflow. Tasks are key to this approach and it’s why we spent such a long time researching these tasks with existing customers, to make sure we’re understanding how the modern merchandiser and marketer approaches their work. Working in conjunction with Toledo2, we now have extensive wireframes for all phases – yes, we have a lot of wireframes! Hundreds and hundreds of scenarios mapped out. That’s not to say that these won’t change as we refine the designs of course, but this important exercise has allowed us to be sure that the part we’ve built for our CTP – the core framework which encompasses the whole interface grid layout, will be capable of the level of flexibility needed to cope with (we hope) any scenario we throw at it. Fig 1: A Host of Wireframes! The Look and Feel Although our UX process dictates that at this point, we’re not fixated on the final “look and feel” of the interface, we have had a bit of fun with some design ideas. We’re certainly clear that the final UI will be very clean, but this is where we’re so lucky to be working in conjunction with Toledo2, who are really making a name for themselves around the world. For some of the philosophy behind their thinking, I highly recommend reading Arturo Toledo’s design blog, in particular their work with Microsoft and Nokia on the Beyond Tiles design tour. Here are a couple of “work in progress” screens: Fig 2: The results of searching for "webcams" - all the places they appear... Fig 3: A typical Product Editing Screen arrived at through faceted search For those interested in the nuts and bolts, our responsive design grid is based on the popular Twitter Bootstrap framework and many of our controls will be based on the JQuery UI toolkit. It was important to us not to reinvent the wheel, we are instead focused on getting the tasks and flow correct and, of course, making sure we have the necessary API framework in place to support our concepts. As with past releases, we will provide developer extensibility for advanced customization and extensions. Upgrades will simply be a matter of downloading and deploying a single file – which will take only minutes. And, because it’s browser based, all business users will be updated at once. As we said previously, the new tools will debut alongside the next version of the product and we are on track to have that CTP in customer and partners’ hands by the end of the year – with the Initial Release shortly thereafter and frequent ongoing updates after that. But as a reminder, this is being delivered as a separate project from the main server release, in much the same way as our starter sites, so as we move through the project phases, you will see constant ongoing updates. And once we’ve started to deliver, there will be a clear feedback channel for you to comment and help us improve them on an ongoing basis – and see that feedback reflected in weeks to months. We’re incredibly excited about what the team is producing, we know you will be too!
OPCFW_CODE
SQL Server 2012 Date Change I am attempting to convert the DATETIME Column titled CREAT_DTTM to a simple "1/1/2014" format. I have looked at CAST, CONVERT and FORMAT functions but i just can't get it to work. Any guidance would be greatly appreciated! I am running SQL Server 2012 Some sample data CREAT_DTTM ------------------------ 2014-01-01 00:33:58.000 2014-01-01 00:33:58.000 2014-01-01 07:40:01.000 2014-01-01 09:50:27.000 2014-01-01 10:40:04.000 2014-01-01 10:40:04.000 By convert I mean: This data is being pulled from another table by a stored proc our developer created. It is sales data that shows when an order has been entered into the system. I created a powerpivot data slicer in Excel that is linked to this table but they do not like the format the date is displayed in. So I was attempting to convert it from the aforementioned format to one more acceptable by the stakeholders. Only thing is that I do not have ample experience in writing queries What do you mean by "convert"? What are you doing with the dates that requires the conversion? Welcome on SO. Could you show us what you have tried so far? By convert i mean: This data is being pulled from another table by a stored proc our developer created. It is sales data that shows when an order has been entered into the system. I created a powerpivot data slicer in excel that is linked to this table but they do not like the format the date is displayed in. So i was attempting to convert it from the aforementioned format to one more acceptable by the stakeholders. Only thing is that i do not have ample experience in writing queries..pretty much a newbie @user3896424 The reason I asked is that converting it to display in another format still keeps the underlying value the same, do you just want to display it in a different format to the user? If so, do that in the presentation layer not in the database. This should be done in Excel. Format your cells appropriately. I have not updated our stored proc yet.. i just played with a query on the data.. but the suggestion to just format it in excel made me realize that i can reformat it in the powerpivot table window.. so the data slicer will pull the format i need. Thanks for the help.. i definitely learned a few things from you guys! Try the following. select convert(varchar(10), creat_dttm, 101) from yourTable Wow...not sure why somebody downvoted this. This is exactly how I would do it. Probably because this will convert a date to a date. Fixed. Oversight on my part. Thank you to everyone for the help.. greatly appreciated.. and the above worked to get it into an acceptable format.. once again... i thank each and everyone of you for the help and consideration!! Just remember that doing it this way means the client (i.e. Excel) doesn't know it's a date any more and cannot perform things like sorting and puts an unnecessary load on the database if there's lots of date to convert. You can change the format of the dates in a PowerPivot table through the PowerPivot Window. The advantage here is that you do not need to do any modification of your stored procedure and your datatype is still a date when it comes into your pivot table. Open your PowerPivot Window again Select your data column, Select Fromat from the Formatting section on the Home tab. exactly what i did.. that worked and i learned the accurate way to CAST .. thanks! Try this: select convert(varchar(15), creat_dttm, 101) from tblTableName I think that CAST will do the job nicely for you, e.g. select cast(creat_dttm as date) from theTable That will have dashes, the OP clearly states the format they want is with slashes. @SeanLange: this converts the DATETIME to a DATE - neither of the two have any format associated with them in SQL Server - they're 8 and 3 bytes of binary data respectively. The dashes or slashes only occur when you're doing a string representation of those dates (or datetimes) I understand the code and difference between datatypes. The OP wants a string representation and they clearly stated slashes.
STACK_EXCHANGE
Autonomous vehicles are the future of transportation. But before these self-driving cars can hit the roads, there’s a lot of work that needs to be done behind the scenes. One of the most important facets of autonomous vehicle development is mapping and localization. In this blog post, we will explore what simultaneous mapping and localization is and how it’s being used to develop autonomous vehicles. We will also touch on some of the challenges associated with this technology. What is Simultaneous Mapping and Localization? Simultaneous mapping and localization (SLAM) is a robotic mapping technique that uses an autonomous robot to construct or update a map of its environment while keeping track of its own location. It is one of the key technologies used in self-driving cars and mobile robots. SLAM algorithms take sensory data from the robot’s sensors as input and use it to create a map of the environment. The algorithm also uses this data to estimate the robot’s position within the environment. This estimation is called localization. Localization is essential for creating an accurate map, as it allows the algorithm to know where the data from each sensor reading should be placed within the map. There are two main types of SLAM: outdoor SLAM and indoor SLAM. Outdoor SLAM algorithms are typically used for self-driving cars, as they can make use of GPS data to help estimate the car’s position. Indoor SLAM algorithms are used for mobile robots that operate in GPS-denied environments, such as indoors or underwater. Outdoor SLAM algorithms often make use of wheel odometry data in addition to GPS data. Wheel odometry is a measure of how far the wheels have rotated since the last measurement was taken. This information can be used to estimate the distance travelled by the robot, even if GPS signals are unavailable. Indoor SLAM algorithms usually make use of visual data from cameras or laser rangefinders. These sensors can be used to create a 2D How Does Simultaneous Mapping and Localization Work? When it comes to robots and other automated systems, one of the key components is Simultaneous Mapping and Localization (SLAM). In order to understand how SLAM works, it’s first important to understand what each term means. Mapping refers to the process of creating a map of an unknown environment. This is typically done by having the robot move around the environment and taking measurements at different points. These measurements are then used to create a representation of the environment, which can be in the form of a 2D or 3D map. Localization refers to the process of determining the robot’s position within the environment. This can be done in a number of ways, but is typically done by comparing the measurements taken by the robot to the map of the environment. By doing this, the robot is able to determine its position and orientation within the environment. So how does SLAM work? The key is that SLAM combines both mapping and localization into one process. This means that as the robot moves around an unknown environment, it is simultaneously creating a map of that environment and determining its own position within that map. There are a number of benefits to this approach. First, it means that the robot doesn’t need any prior knowledge of its surroundings in order to create a map. Second, it’s much more efficient than traditional methods because it doesn’t require multiple passes through the environment or Simultaneous mapping and localization is a process of creating a map of an area while also determining the location of the mapping device within that area. This process is often used in robotics and autonomous vehicle applications, as it allows for more accurate navigation. While simultaneous mapping and localization can be computationally intensive, recent advances in technology have made it more feasible for real-time applications.
OPCFW_CODE
On collapse theories like GRW, Quantum Mechanics proceeds deterministically according to the Schroedinger Equation until a random "hitting" event occurs, when collapse occurs. There is a frequency parameter f that controls how often hitting events tend to happen. They tend to happen much more frequently when there are large amounts of matter involved than when there are small. Nonetheless, the hitting events are random. Thus the physics of collapse theories implies that it is physically possible, with a non-zero (but presumably tiny) probability, that no hitting event happen in the universe over the next year, and hence no collapse happens over a year. Since this is physically possible, it should make sense to ask: What would it be like if this happened? Indeed, if we live in an infinite multiverse governed by a collapse theory with the same frequency everywhere, we can be confident that such no-collapse years do occur. So what would it be like if no collapse occurred? I can think of three plausible proposals: - Nothing: A (nomic or metaphysical) precondition of consciousness is a brain in a pure, or at least close to pure, quantum state, so in a no-collapse year, once everybody's brain states came to have a sufficient superposition of states (from the preferred basis), nobody would be conscious. However, after that year passed, there would be a collapse, and there would be false but plausible memories corresponding to the outcome of the collapse. - Everett: For that no-collapse year, we would be living as if in a branching Everett-style multiverse. Either we would be experiencing different things in different branches, or we would have counterparts in different branches experiencing different things. Then with the collapse at the end of the year, all the branches but one would disappear. - Weird: We would be having strange superposed experiences, perhaps quite unlike anything we can imagine. We would have superposed neural memory states. Then at the end of the year, when collapse occurred, our memories would also collapse, and we would end up with an ordinary set of memories corresponding to one component of the superpositions. Here's one curious feature of all three proposals: At the end of the year, we would be back to business as usual, seemingly with normal memories of the past year. We would have no way of telling after the fact that we had a year with no collapse. On the Nothing proposal, we would have no way of telling during the no-collapse year, either, since we wouldn't be conscious during it. On the Everett proposal, some of our counterparts or branched selves would be having strange, improbable experiences. On the Weird proposal, we would be having strange experiences, but then we would have no memory of them. If we take the Everett proposal, then the GRW theorist does not avoid the metaphysical oddness of persons in a branching multiverse—her only special contribution is to say that this oddness is unlikely to occur. If we take the Weird proposal, then the collapse theorist still has to deal with the metaphysical and psychological oddness of Schroedinger's cat phenomena—again, her only contribution is to say that such phenomena are unlikely. If these difficulties are really serious metaphysical problems, then the GRW theorist does not avoid them. A die that turns into a square circle once it rolls a million heads is not any less metaphysically problematic than a plain and simple square circle. I suspect the Nothing approach is the best one for the GRW theorist. For instance, Nothing combined with compact-support-collapse helps with the infamous tails problem for collapse theories (see this for a nice discussion of the tails problem; alas, my suggestion doesn't help with the relativistic problems the author points out). For maybe we are conscious only at those instants when the wavefunctions have compact support. This is good reason to opt for the Nothing proposal if one has a collapse theory. But the Nothing approach leads to a strange sceptical hypothesis, namely that I have not been conscious over the past week, notwithstanding apparent memories from yesterday. For remember that the collapse theories have a free parameter, f, which governs the frequency of collapses. If that parameter is low enough, then collapses will be rare, say once a month in my vicinity. And what reason do I have on the Nothing proposal to suppose that f isn't that low? The apparent memories of continuous past consciousness are exactly what I would expect with a low parameter, since the apparent memories are induced by the collapse of superposed neural states. We do have some constraints on f. For instance, f had better not be so low that it's surprising why anybody is ever conscious. Maybe there are some stronger constraints than that, though this is not clear to me. But there is no reason given the Nothing proposal to deny a value of f that yields once-per-month collapses. The Everett proposal may well lead to a sceptical worry about low values of f as well. For how do we know that we're not right now in a no-collapse period? The Weird proposal does not lead to this sceptical worry that f might be low. For on the Weird proposal, given a low f it's surprising that my current conscious state is non-weird, and so that's evidence against Weird plus a very low value of f. But Weird is weird. The above sceptical worries about low values of f are ameliorated if in addition to being collapse theorists we are theists. For God likely wouldn't want us to have too many misleading memories, and hence would likely make f high enough to prevent misleading memories.
OPCFW_CODE
Over the few years when neural models are the state of the art in machine translation, the architectures got quite standardized. There is a vocabulary of several thousand discrete input/output units. As the first step, the inputs are represented by static embeddings which get encoded into a contextualized vector representation. It is used as a sort of working memory by the decoder that typically has a similar architecture as the decoder that generates the output left-to-right. In most cases, the input and output vocabularies are the same, so the same embedding matrix can be used both in the encoder, in the decoder, and also as an output projection giving a probability distribution over the output words. Indeed, they are different underlying architectures (recurrent networks, convolutional networks, Transformers), people try to come up with conceptual alternatives such as non-autoregressive models or insertion-based model. However, there is not much discussion about when the initial embedding layer is really necessary and when not. This is the question that a recent pre-print Neural Machine Translation without Embeddings from the Tel Aviv University attempts to answer. The paper presents a series of relatively simple experiments. They trained translation systems with the standard architecture with subword input, character inputs, and byte inputs. In an ASCII world, characters would be the same as bytes, but with Unicode, characters might consist of multiple bytes (my impression is that the more non-western the alphabet is, the more bytes are required for a character). As a contrastive experiment, they trained a byte-based model without embeddings, i.e., the input represented as one-hot vectors. It is nicely shown in Figure 1 of the paper: Let us discuss what omitting embeddings really means. The one-hot vector that is on the input gets multiplied by some weight matrices—which means it gets embedded anyway. In the Transformer architecture, the encoder starts with a self-attentive sub-layer, so we can view it as having three different embeddings for each byte: one for attention queries, one for attention keys, and one for attention values (that get further split for individual heads). With a subword vocabulary with tens of thousands of units, this would mean a considerable increase in parameter count, after all the embedding matrix is one of the biggest parameters of the models. There are only 256 possible byte values, not having the embedding layer hardly affects the total number of parameters. The quantitative results show that when translating from English, the non-embedding byte-level model performs on par with sub-word models. The paper shows an intuitively clear fact, but it is nice to have this intuition confirmed experimentally. The embeddings are not a magical component the NMT model cannot work without. The role of embeddings is mainly reducing the number of parameters (and thus also being able to learn something about less frequent tokens). At the top of the Transformer encoder, we need to represent the input for queries, keys, and values in the self-attention heads. With the embedding layer, we assume, this can be decomposed into two matrices: one “general” word representation and one the task-specific projection in the first layer. (With the RNN models we can about different representations for different gates fo the cell.) Under such an interpretation, I am a little bit confused about the conclusions of the paper. There is a discussion about some sort of meaning orthogonality of the bytes which allows them to get rid of the embeddings. I am not sure if this is the right view: they still multiply one-hot representation by a weight matrices, so technical, they do embed the inputs, but differently for different self-attention components. Also, the results they present do not agree with what I got when I experimented with character-level models on WMT data and this year’s ACL papers on char-level models that show that there is still a gap between subword models and character models. This gap seems not to exist in the baseline experiments in this paper. I am quite curious what is the secret ingredient that causes that. However, it might be a difference between WTM and IWSLT datasets.
OPCFW_CODE
- VM recovery overview - Planning for the procedure - Steps for recovering VMs After the initial deployment of the VMs, some VMs might malfunction due to various reasons. For example, a service fault or a system failure might cause a VM to malfunction. Depending on different situations, Rhino VM automation allows you to recover malfunctioning VM nodes without affecting other nodes in the same VM group. The following table summarizes typical VM issues and the recovery operation you can use to resolve each issue. |Recovery operation to resolve the issues Transient VM issues. Reboot the affected VMs, in sequence, checking for VM convergence before moving on to the next node. A VM malfunctions, but the During the healing process, the system performs decommission operations, such as notifying the MDM server of the VM status, before replacing the VM. A VM cannot be recovered with the During the replacement process, the system doesn’t perform any decommission operations. Instead, it deletes the VM directly and then replaces it with a new one. All VMs in a group don’t work. Redeploy the VM group, by using the Backout procedure for the current platform. All VMs that have been deployed don’t work. Perform a full redeployment of the VMs, by using the Backout procedure for each group of VMs, then deploying again. Recovery operations in the table are ordered from quickest and least impactful to slowest and most invasive. To minimize system impact, always use a quicker and less impactful operation to recover a VM. csar heal and csar recovery operations are the main focus of this section. VM outages are unpredictable, and VM recovery requires a human engineer(s) in the loop to: notice a fault diagnose which VM(s) needs recovering choose which operation to use execute the right procedure. These pages focus on how to diagnose which VM(s) needs recovery and how to perform that recovery. Initial fault detection and alerting is as a separate concern; nothing in this documentation about recovery replaces the need for service monitoring. rvtconfig report-group-status command can help you decide which VM to recover and which operation to use. Both the heal and redeploy recovery operations replace the VM, rather than recovering it "in place". As such, any state on the VM that needs to be retained (such as logs) must be collected before recovery. Don’t apply configuration changes until the recovery operations are completed. Don’t upgrade VMs until the recovery operations are completed. This includes recovering to another version, which is not supported, with the exception of the "upgrade before upload-config" case below. A VM can only be recovered back to the version it was already running. A recovery operation cannot be used to skip over upgrade steps, for example. Before upgrading or rolling back a VM, allow any recovery operations (heal or redeploy) to complete successfully. |The reverse does not apply: VMs that malfunction part way through an upgrade or rollback can indeed be recovered using heal or redeploy. There is one case in which it is permissible to heal a VM to a different version, when the mistaken steps have occurred: The VMs were already deployed on an earlier downlevel version, and An upgrade attempt was made through csar updatebefore uploading the uplevel configuration, and csar updatecommand timed out due to lack of configuration, and A roll back is wanted. In this case, you can use the csar heal command to roll back the partially updated VM back to the downlevel version. This procedure assumes that: you have have access to the SIMPL VM that was used to deploy the VM(s) you have detected a fault on one or more VM(s) in the group, which need replacing Do these procedures in a maintenance period where possible, but you can do them outside of a maintenance period if the affected VMs are causing immediate or imminent loss of service. VM recovery time varies by node type. As a general guide, it should take approximately 15 minutes. You must have access to the SIMPL VM, and the SIMPL VM must have the right permissions for your VM platform. This page references an external document: the SIMPL VM Documentation. Ensure you have a copy available before proceeding.
OPCFW_CODE
Should inter-service API be separated from public API? I have a problem with proper API design. So there's an app where users can track their sport Activities. User can later access and view them. It can be done by making GET /api/activities/{id} request, where id is an id of the activity. Internally I am searching db by user id and activity id, so that another user cannot view another user's Activity. But then I need to access and process that Activity in another app service. So I need to have access but with that endpoint I won't get it because of not matching user id. So I can see these solutions: Check permissions of the requester in the controller and if it's service then query db by just activity id (I don't like it because of possible code complication – need to pass roles to services or provide separate functions for each one and call the right one from controller). Create separate endpoint(s) for inter-service communication (of course properly secured). Fetch activity by id always and decide later wheter to return it or not depending on requester (simpler than 1. but there's unnecessary deserialization of objects from db). What is the best approach and best practises? If these were really identical APIs with different auth, there might be a clean way to separate this concern out within the implementation. But I suspect the admin API will have different or additional features from the public API, so I would strongly recommend creating two separate APIs. When I've done this with a single API, it ends up being complicated and riddled with if/then statements (or similar). It becomes hard to reason about, because the reader of the code isn't sure who's calling each endpoint. I've had new users come to the code and not realize it was also the admin interface, or vice versa. Also consider: The public API and needs to be documented clearly and publically, and the private API needs a different, private, and perhaps more technical documentation. The auth is different, as you have pointed out. This may evolve and become more complex. What if you want to keep the private API behind a firewall or in a VPN? They will scale and throttle differently. This may be okay at first, but I've had to put different throttling policies dependending on the user-- and it gets complicated fast. Since these have different users, they naturally want to evolve at different rates. The public API wants to be stable, or have clear major version changes; but a private API can evolve faster and more organically. It will be clearer to everyone involved if these are just separate APIs and named as such. If there is shared functionality-- and it sounds like there is-- you can share code internally, creating a shared service for shared functionality. It Depends. I've worked on projects that go each of these ways. Having independent end-points (even if they are duplicates) keeps each communication channel independent and stream lined. The con is that you must maintain multiple interfaces that potentially share code paths. Having a single set of end-points keeps the interface simple. But it complicates Authorisation checking, it cannot be simply folded into Authentication. If you follow this path never have the API behave differently based on the user. It is painful to test, and brittle. Testors assign themselves as a "service" to ping the api via a http tool, they forget then use the website and it errors. Errors are logged as defects, defects are investigated, sometimes for days only to discover it was human error. The last one is handy when the repository interface is simple (ie CRUD). Sometimes it really is faster/simpler to load the resource then determine if you should return it. On a modern database (sql/nosql) its often faster to have the query perform a check, and only return if it passes. My personal opinion is to go with option 1 or 2 depending on how much overlap there is. Option 3 is more of an implementation detail, useful but its clouding the waters here.
STACK_EXCHANGE
You know how the hustle goes. You share your article on Hacker News but it just withers and dies. You post it to Reddit but all you get is someone telling you off. You try promoting it on a Facebook group and you get banned. Can you really turn this: Four days and no upvotes? Why even bother? Ouch, wouldn't want that. That's more like it! First spot. Perfect. Let’s have a look at a few tips. General Rules of Thumb Don’t try to establish a presence on every forum around. Pick a few communities that you’ll enjoy hanging around on. When someone looks at your profile history make sure they see a person, not a marketing bot. That’s what Patrick McKenzie is doing. Think “word of mouth”, as opposed to SEO. - Be helpful. You can’t just post a link to your article and expect people to follow it. Instead help solve painful problems, subtly suggesting there is more where that came from. That’s what Harry Dry is doing. Think effective frequency. - Forget about what you want. This is How to Win Friends and Influence People applied to the internet world. Focus on helping others, and they’ll happily come to you for more. - Make it easy to find you. If you’ve helped enough people they will check out your profile. Make sure to leave a link to your site there. In the case of Facebook or Reddit leave some clues in your posting history. - Reuse your content. Reply using the posts you wrote, but change them enough to appear as if you’re answering the person directly. The comment from the screenshots above that got me 25 Facebook likes is an edited version of my blog post. - Scout for problems in advance. Sometimes a question gets asked over and over again. Go through the archives and look for a question like that. Then write an article about it. If in the future the same question comes up you’ll have an answer ready. For example, in SaaS circles, there seems to be a thread about problems with Paddle every week. So I wrote an article with a list of Paddle.com problems. - Keep your history clean. Si Quan Ong, expert Reddit marketer employed at Ahrefs, in his seminal article writes: “I delete any “promotional” submissions that do not get > 10 upvotes in 48 hours”. If done well this is what you can expect A programmer who quits his job to work on his own app is concerned with server costs. This is a recurring pain that I noticed while hanging out on the Indie Hackers forum. So when I learned how to get $5000 Amazon AWS credits for $49 I wrote an article about it. I configured Syften to notify me when someone complained about their infrastructure costs and waited. Sure enough, one day a “How much does it cost to run your side project?" thread popped up. I replied as soon as I got the notification and watched: The art of helping others Note how I provide the solution in the comment itself. The focus is on helping, not promoting. I’m not tricking anyone into visiting my blog. My comment became the most upvoted one in the thread, and the thread reached the “top threads of the month” list. I got 17 upvotes, but did anyone click the link? Traffic from just that one post I promoted my blog only once that week, so all of this traffic came from that one comment. I also got five mailing list signups. Okay, let’s roll Each community is governed by different rules. See these guides for details. - Facebook. It’s easy to get banned. It’s also easy to be popular. - Hacker News. Difficulty level: nightmare. - Product Hunt. Launch every month. - Quora. Which questions to answer and how to avoid the spam detector. - Reddit. Share your blog post on Reddit with confidence.
OPCFW_CODE
Add type conversion when given a schema People do weird stuff in JSON. A lot of systems kinda figure stuff out. Redshift will convert strings to INTs for you etc. PyArrow purposely avoids doing unexpected stuff, and would rather fail. We should add support for forcing proper types on JSON data before ingesting it with PyArrow, if we are given a schema. This can actually get pretty sticky to do. I can share some code I wrote for our internal usage. There are a lot of tricky edge cases to work around (and some opinionated things we did). Here is a snippet of what we use to do this conversion. If we want to add type coercion, I think we should make it optional for the user, as I am not sure what they do on their systems. This code is run over the JSON, row by row, before columnarization. schema is a Pyarrow schema object. from decimal import Decimal import pyarrow as pa def _set_types(record, schema, bad_field_names=[]): # I know we don't normally want a list as a kwarg because its mutable # but we explicitly rely on that property here for field, val in record.items(): if schema.field_by_name(field) is None: if field not in bad_field_names: logger.debug("Skipping field '{}' which is not present in schema {}.".format( field, schema.names)) bad_field_names.append(field) continue elif schema.field_by_name(field).type.id in (pa.int64().id, pa.int32().id): # Do some type conversion with bad value cleanup, this may get nasty # Redshift does a lot of this automatically, Spark can do it begrudgingly if val == '': record[field] = None elif val is not None: record[field] = int(val) elif schema.field_by_name(field).type.id == pa.string().id: if val is not None: record[field] = unicode(val) elif schema.field_by_name(field).type.id in (pa.float64().id, pa.float32().id): if val is not None: record[field] = float(val) elif schema.field_by_name(field).type.id == pa.bool_().id: if val is not None: record[field] = bool(val) elif schema.field_by_name(field).type.id == pa.decimal128(1, 1).id: if val is not None: precision = schema.field_by_name(field).type.precision scale = schema.field_by_name(field).type.scale record[field] = _convert_decimal(val, precision, scale) def _convert_decimal(val, precision, scale): """ Messy function to clamp a float value to a known Decimal precision and scale """ _dec = Decimal(val) # Makes a string like '{:10.6f}' format_string = '{{:{precision}.{scale}f}}'.format(precision=precision, scale=scale) return Decimal(format_string.format(_dec)) Since these requires a whole second pass over the data, it is rather inefficient. It could probably we cleaned up in to a function which takes a value and a type, and returns it, so we can use it during a single pass for columnarization.
GITHUB_ARCHIVE
September 28, 2021 Sorry for the radio silence. I’ve been working at ACES HIGH and also preparing a different project (to be announced shortly)! ACES HIGH is basically done pre-editing in its current incarnation. But “current incarnation” is the key. I’ve been thinking about the core mechanics and I’m going to make a few major changes to how certain things work - something that’ll take things further away from core LUMEN and lean a little more some mecha concepts I wanted to emphasize. September 14, 2021 Tyrants were the biggest reason I decided to fast-track this expansion over creating premade missions. If you’d been paying attention to v0.1.0, you might have noticed that Prime enemies got a downgrade between versions. This is because originally Prime enemies were supposed to fill the role that Tyrants currently fill. But as I playtested, I decided they needed to be their own thing. A Tyrant is a serious enemy. September 9, 2021 Hi there! In this post I’m going to be talking about the Campaigns section of the upcoming Aces High. Specifically, the mechanical bits: Crisis Advances and Moonshot Advances. Crisis Advances # The idea behind Crisis Missions is that they’re intended to add points of challenge and tension to an otherwise power fantasy-oriented game. But when the consequences are simply narrative, this can create a disconnect when the Aces' actions are otherwise unaffected by these encounters that are supposed to signify increasing danger to the place they call home. September 4, 2021 Hi there! I’m going to both announce a supplement to APOCALYPSE FRAME core, tentatively titled Aces High, and also talk a little bit about one part of it. The supplement is going to be bundled in free with the core book because it contains stuff I feel is necessary to an experience that feels complete. (Also, it gives you an idea of what I’ll include in future non-bundled supplements.) You can see my current overview spread for it below: August 31, 2021 Hi there! As promised, I’m releasing an update to APOCALYPSE FRAME after some re-reading, testing, and thinking about the system. In addition to general cleanup, the changes are as follows: Attributes # Attributes for every character are now 3 in one Attribute, 2 in another, and 1 in a third. Between missions, you can swap your 3 for your 2 and your 2 for your 1. See the devlog here for my thoughts on the matter. August 26, 2021 Hi there! I’m going to talk about some changes to enemies, as well as two minor changes to Frame abilities. Enemies # The biggest change to enemies is going to be the number of activations on the GM Turn. Prior to this, the GM would take a number of Moves on the GM Turn equal to the number of Aces. With the amount of tools Aces have at their disposal, however, this doesn’t end up being enough to make much of a dent relative to the number of Drops with a modicum of smart play. August 22, 2021 Hi there! As promised, I’m going to talk about some other things that will be rebalanced for v0.2: Frame Damage and (a handful of) Tags. Some of these were revelations I had while testing, but others were a function of considering how things came together more closely. Frame Damage # Frame Damage is the consequence of losing all of your Vigor. It’s intended to show wear and tear on a Frame that’s taken significant punishment and raise stakes in combat. August 15, 2021 Hi there! I’m going to be releasing dev logs for my process periodically. Especially as the game progresses and gets more content, it’ll probably make some decisions (in the past and future) make more sense. Hopefully it’s of some use to someone! Attributes in v0.1 # In v0.1, the LUMEN Force/Flow/Focus attributes, Drive/Speed/Control, must simply total to 6 - minimum 1, maximum 4. This means you’d get the various permutations of three arrays: 4/1/1, 3/2/1, and 2/2/2. August 13, 2021 This is the first development log post for APOCALYPSE FRAME! Thanks for taking the time to read it, and thanks so much if you’ve purchased/downloaded the game. Please let me know what you think! v0.1 of the game was released! # A fully playable version of the game is released! It’s got enough content to last a group a good while, advice that will help a GM run both long-form and short-form games, and ideas to help spur creativity.
OPCFW_CODE
Test/Train data set for Graph Network I have a graphical network that I am creating as follows: g=nx.read_edgelist(data, create_using=nx.Graph()) I am trying to create a test and train set for the data. I tried using the below command: train, test = train_test_split(g, test_size=0.2) but this did not work. Can you please advise how I am suppose to create a test and train set when I have a graphical network. Depending on your task, you can have a try with Stellargraph's EdgeSplitter class(docs) and scikit-learn’s train_test_split function (docs) to do this. Node classification If your task is a node classification task, this Node classification with Graph Convolutional Network (GCN) is a good example of how to load data and do train-test-split. It took Cora dataset as an example. The most important steps are the following: dataset = sg.datasets.Cora() display(HTML(dataset.description)) G, node_subjects = dataset.load() train_subjects, test_subjects = model_selection.train_test_split( node_subjects, train_size=140, test_size=None, stratify=node_subjects ) val_subjects, test_subjects = model_selection.train_test_split( test_subjects, train_size=500, test_size=None, stratify=test_subjects ) train_gen = generator.flow(train_subjects.index, train_targets) val_gen = generator.flow(val_subjects.index, val_targets) test_gen = generator.flow(test_subjects.index, test_targets) Basically, it's the same as train-test-split with a normal classification task, except what we split here is nodes. Edge classification If your task is edge classification, you could have a look at this Link prediction example: GCN on the Cora citation dataset. The most relevant code for train-test-split is # Define an edge splitter on the original graph G: edge_splitter_test = EdgeSplitter(G) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G, and obtain the # reduced graph G_test with the sampled links removed: G_test, edge_ids_test, edge_labels_test = edge_splitter_test.train_test_split( p=0.1, method="global", keep_connected=True ) # Define an edge splitter on the reduced graph G_test: edge_splitter_train = EdgeSplitter(G_test) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the # reduced graph G_train with the sampled links removed: G_train, edge_ids_train, edge_labels_train = edge_splitter_train.train_test_split( p=0.1, method="global", keep_connected=True ) # For training we create a generator on the G_train graph, and make an # iterator over the training links using the generator’s flow() method: train_gen = FullBatchLinkGenerator(G_train, method="gcn") train_flow = train_gen.flow(edge_ids_train, edge_labels_train) test_gen = FullBatchLinkGenerator(G_test, method="gcn") test_flow = train_gen.flow(edge_ids_test, edge_labels_test) Here the splitting algorithm behind EdgeSplitter class(docs) is more complex, it needs to maintain the graph structure while doing the split, such as keeping the graph connectivity for example. For more details, cf source code for EdgeSplitter
STACK_EXCHANGE
So, if It's not pure objected oriented, why do we use it? Since it addresses several of the important concepts that are not lined by pure objected oriented programming languages like JAVA. Several learners employ our programming language assignment help mainly because they know we've been legitimate and often provide even during the oddest time. Now we have a gaggle of coding industry experts and tutors who are actually in the sector of programming for a few years. In case you are nonetheless in doubt You'll be able to experience our testimonial area and discover the beneficial opinions and responses that our purchasers have been sending. It Also didn’t damage that Perl is often a friendly language. It plays effectively together with your individual programming layout. The Perl motto is “There’s more than one technique to do it,”which offers by itself nicely to minimal and substantial difficulties alike. A C++ application is an assortment of commands that orders the pc to execute a particular process. This list of commands is Generally called as C Source code, code or resource code. C is among the most common programming language and if end users never have an understanding of this language, then almost certainly buyers are going to be far clear of comprehending other programming languages. You could lack in-depth expertise in programming and discover the endeavor assigned to you personally incredibly intricate – Understanding C++ cannot be performed in on a daily basis. Availing the help of our C++ coding help specialists can furnish your programming capabilities and cause you to proficient in see page every one of the ideas of C++. Now we have all my latest blog post the most up-to-date technology and means that your professor needs you to utilize as part of your undertaking. Duplicate pasting online and publishing Incorrect methods will definitely earn you minimal marks. Your physics homework can be a serious problem, and also the owing day is often really shut — Be at liberty to use our support and get the specified end result. Physics But group of MyassignmentHelpAu helped me with each of the assignment related challenges. They served finest assignment alternatives for my psychology homework. All my queries are actually competently solved by them. Because it is really an extension of C, each system inside the C language is usually easily consolidated/ compiled Along with the C language. The student can easily experience it, comprehend and manage to solve precisely the same assignment on his own up coming time. Our articles can be useful for upcoming reference and when planning for Examination Java programming: This is one of the most popular programming languages in the world, and it really is taught in almost every university in the world. Java programming language has all of the required functionalities, and for that reason explanation, there has been an enormous depreciation in programming follow. Geeksprogramming.com is programming website and finest programming assignment help provider service provider. We provide programming help in Just about all programming languages. GeeksProgramming was started out Together with the intention of Geeking out with or helping out anybody and everybody who will benefit from our coding capabilities. We provide high-quality C programming assignment help so that you could basically question any inquiries you will have identified Using the arrangements we have given. Perl’s roots in text processing haven't been overlooked over time. People libraries, gathered inside the CPAN, give Completely ready-produced solutions to a exceptional choice of concerns.
OPCFW_CODE
With the advent of the World Wide Web, HTML, and browsers such as Netscape Navigator, application developers unknowingly reverted back to a thin-client model that look awfully similar the days of the IBM green screen. Yes, we now had a graphical user interface but the idea was the same. The browser had become the thin client, the work that in a client-server architecture occurred on the desktop had now been shifted to the middle tier, and the data base and enterprise applications (e.g. SAP) were still managing the heavy lifting on the back end. It is important to note that the middle tier, which came to be know as “application servers”, moved back in to the glass house. Application servers proved to have a great deal of value to the enterprise. They provided an environment where one could run their business logic and acted as an enterprise integration engine. Changing anything on the mainframe was still as difficult as ever, but developing web-centric applications and deploying those in an application server environment that occasionally would interface with back end data and applications was getting increasingly easy. An added benefit was that application servers could also act as integration engines across enterprise applications, an area that had been the exclusive domain of enterprise integration platforms such as TIBCO and IBM’s MQ Series. Another key enabler of the growth of the three-tier architecture was the introduction of the Java programming language. The introduction of internet technologies into the enterprise allowed UNIX systems into the enterprise. This meant that developers had to design and compile their applications separately for each runtime environment where they might reside. A windows application would not run on UNIX. Applications developed for Sun Solaris would not run on HP/UX. The Java promise of “Write Once, Run Anywhere” tackled this problem head on. Key to the success of the Java programming language was adoption of the concept of the Java Virtual Machine (JVM). This meant that each platform provider (e.g. IBM, H-P, IBM, etc.) had to develop a Runtime “container” that was adhered to the Java standard and abstracted the platform specific interfaces. Any manufacturer that wanted to play (survive) in the world of web-centric three-tier architectures had to develop a JVM for their platform. Java developers no longer had to worry about developing for a specific platform which removed another significant barrier to adoption. The Java centric application server also brought enterprise class management tools to the middle tier. Application servers could be replicated and scaled with relative ease. Enterprise class failover and fault tolerance was absolutely required for before an application server would be allowed to run mission critical applications in the enterprise. The tools were now in place for the three tier architecture to be broadly adopted in the enterprise. In a previous post on Client-Server computing I identified two key components that were required before the Enterprise would adopt the client-server architecture. The three tier architecture added a third component. The key components now included: - Hardware Infrastructure & Networks - Software Development Tools & Platforms - Manageable Runtime Environments (e.g. JVM) This is my version of the three legged stool of the enterprise “Internet of Things” (IoT). If any one of these is missing IoT centric vendors will have a very difficult time breaking into the enterprise.
OPCFW_CODE
Close Sprints in JIRA are simply a set of activities or concerns for a software program that the workforce should focus on during a specified period and add the sprint to JIRA itself for tracking and managing sprint. The sprint's objectives or issues are drawn from the team and project list, with every sprint often lasting one, two, or four weeks, depending on the magnitude of the needs and the sprint timeline agreed upon primarily by the workforce and the project leader. Once the sprint is ended, the created versions will be accessible to begin the next sprints. JIRA learning has been designed for professionals interested in understanding how to utilize the JIRA application. It will allow the audience a strong understanding of the many features of the JIRA close sprint and relevant examples of each. Everyone enjoys the sense of achievement and pleasure that comes with completing a large project and presenting a comprehensive, high-quality work product. Unfortunately, the pathway to the task is commonly accompanied by delayed or unfinished products, and both teammates and project supporters are dissatisfied. As the project manager, master of Scrum, or end-user, you can examine how your work progresses against the available version and give your stakeholder comments. When is an Agile Sprint Completed? Unfinished tasks can generate various issues in Software methodology, including decreased pace, lowered quality, and ongoing change around what is expected to be accomplished. To minimize this, the workforce and program stakeholders must agree on the milestones and the method they will be evaluated. When embarking on an ongoing methodology, it is critical to have a clear vision for the outcome from the start. Before starting a JIRA close sprint project, objectives, a work plan, and a full grasp of customer demands should be established. After that, the Agile management team turns the mission and vision into a defined set of deliveries that precisely determine when a task can be accomplished. Finally, each product is supplied with a "definition of completion," which outlines what needs to be considered done and counts for the efficiency of a process. There are several phases involved to close a sprint in the JIRA platform, either to verify the existing sprint's completion or to produce the new plan of the software program. These include the following Step 1 Navigate to the backlog panel in JIRA. Step 2 Select the "Active Sprints" section to view all of the application's sprint planning. Step 3 Choose the sprint that you wish to close or end. Step 4 Tap the "Complete Sprint" option to close the sprint in JIRA, which will result in the deletion of all finished tasks associated with the sprint. The first step will be TO BE, followed by IN PROGRESS, and eventually DONE. Step 5 If the work is DONE, the sprint is over. Step 6 If there is an unfinished activity or issue during the sprints, you will start going back to the list or a subsequent sprint or establish newsprint. In JIRA, sprint completion is primarily handled by the Scrum master when the Sprint timebox expires. Sprints in JIRA must be managed to close sprint in JIRA. As more firms adopted agile processes and techniques, JIRA's popularity grew exponentially. Since JIRA software provides numerous support capabilities for agile methodologies, you can begin planning, analyze and deploy the app utilizing user-friendly interfaces that are well integrated. In addition, JIRA Software provides numerous user-friendly tools for easily creating and organizing Scrum and Kanban boards for project management. JIRA training is centered on JIRA Software, the industry's leading solution for agile software development teams. Agile projects have a range of roles that are not always self-explanatory. Perhaps the most critical function in project management is that of the project leader. One primary task they commonly do is organized stand-ups, sprints strategy meetings, sprint assessments, and panel discussions. Additionally, they may manage administrative work on boards, such as reviewing reports and detecting roadblocks and causes of friction. The product manager is responsible for meeting client requirements and helping the team deliver value to clients. They may, however, execute critical activities like managing the backlog and assisting with task prioritization. The project leader function has significant overlap – and rather than leading the team, the product owner acts as a facilitator, ensuring the program's flow with their teammates. The system analyst also includes all the individuals who will put the program into action and lead it to a good result. Before You Proceed, Consider The Following Sprints are exclusively applicable to Scrum teams. If you're using a Kanban system, you should instead read about implementing a version. Complete a sprint only if you have the manage sprints permission. It's possible that your board's filtering is overly complicated, and JIRA would be unable to decide which tasks will be issued by the queries when you're unable to close a sprint in JIRA. To JIRA close sprint, the teammates must identify it as completed. For this, navigate to the left-hand menu, choose "Active sprints," and afterward select "Complete Sprint." This will transfer any outstanding issues from the sprints to the queue. These can be evaluated during the subsequent sprint planning. Meanwhile, the organization should make retrospective sprints. While the sprint backlog is a moment for celebrating, the retrospective is a period for constructive feedback whatever went well and what could have been done better? This enables the team to concentrate on its accomplishments and work toward ongoing improvement. Again, JIRA's reporting is incredibly critical in this instance. Are concerns designated too broadly in the planning phase? Or is the sprint generating an overwhelming amount of new situations? The retrospective provides an opportunity to identify problems and explore ways to improve the process to assist the team in delivering. Tasks to be Completed after a Sprint An end-of-sprint retrospective is typical. This is a discussion in which your organization analyzes the finished sprint and the tasks they now have to begin, stop, or proceed. In addition, this session should result in initiatives for future sprint upgrades. When you close a sprint in JIRA Software, you're presented with the 'Sprint Report,' which contains valuable information about the sprint during your team having a retrospective. JIRA has been created with agile consideration, which implies that you may manage the framework. However, it would be best to be designed entirely to handle your sprints via JIRA from the strategy meeting to the assessment. Learn JIRA online enables you to observe, analyze, and update the tasks you're presently working on while offering complete transparency to management regarding large-scale project accomplishments. Once you've mastered the fundamentals, installing and customizing JIRA is a breeze. JIRA course online covers the fundamental practices of drive agile techniques, equipping students with a versatile set of tools on any agile team. >4.5 ratings in Google
OPCFW_CODE
Initially I was excited about the new framework - hoping it was going to fix many of the fundamental problems with the language but I’ve been really disappointed that it continues to ignore some of the most annoying and glaring framework issues. Introspection still can’t tell you the string value of an Enum. You can’t get the full list of an enum’s values to say fill a popupmenu or convert between the two without building a bunch of helper methods. My understanding is that Xojo doesn’t build this info into the runtime code. Well why the heck not? I know saying, “Pretty much every other language does this” is not a good reason. Xojo likes to be ‘different’ but that shouldn’t mean limiting it in regards to fundamental features. The new introspection framework can’t currently get info about a class without an instance. This makes serialization classes a nightmare. Norm also informed me back in the day that deserializing multi-dimensional arrays was pretty much impossible with the current framework - then why did the new framework not address this? I understand to keep the project small they don’t include unused items. But this is also very dumb. I should be able to iterate through every project item through code. For each item as Object in Project.ProjectItems //OR at least For each style as Styles in Project.WebStyles Someone could build these helper methods themselves, but like the Enum missing methods - creating and maintaining these methods is a nightmare! If Xojo still wants to allow people to keep projects smaller by excluding unused items then maybe have an app setting for this. It by far makes more sense for people to be able to access this in code. You could then also do things like GetClassByName(“ClassName”). I understand Xojo wanting to move to the design of throwing more Exceptions earlier on. But Auto is an annoying replacement for the convenient Variant type. A better solution would have been to create Object Wrapper Classes for all of the primitives as well as their Nullable variants. Not having the Nullable variants makes sharing code and libraries with other people a mess. This is something so fundamental that it really should be included in the framework. I do think the Auto type is salvageable but it needs a lot more helper methods for converting it to other types. Theses methods need both a way that can throws an exception and a way that defaults to a value on exception. Once again having to build these helper (safe) methods is feasible, but it really makes sharing code a nightmare. Also on the topic of exceptions, I was also hoping they would revamp the exceptions to include additional details in the Inner Exception etc… I know that Auto-Layout is coming to the Web Version and that WebStyles etc… will be getting an overhaul. But with Xojo not giving us any roadmap details or allowing us feedback WHILE these systems are being developed, I’m legitimately concerned that these issues will not meet the expectations and needs of us as a community. (AutoLayout cannot rely on a server-side trip, it needs to be seamless on the client-side. Styles either need to be editable in code or we need to be able to apply multiple styles to controls…) There’s plenty of other things I’ve missed and new things I would love to see in the framework (Built in array extension methods that can use lambda expressions i.e. inline functions etc… ) Does anyone else have any annoyances/issues with the new framework? Sorry if this seems overly "rant"ish, I don’t intend it to. With XDC coming up next week, its a good time to bring up these topics and get a feel for what’s realistic to fix and what Xojo is willing to do. I hope to see you all next week!
OPCFW_CODE
- [Major Feature Added] LAYERS!!!!!!!! - [Major Feature Added] "File Compression" -> In comparison with the old format, files are now much smaller. But loading take a bit longer. - [Added] a new blending mode -> "AlphaBlend", which is the new default blending mode for the color channel. In opposite to normal, this mode doesn't replace the alpha transparency. To set the transparency change the blending mode to "Normal". - [Added] a "Layer"-Panel with -> Add Layer, Duplicate Layer, Move Layer Up, Move Layer Down, Separate Layer, Combine Layer Downwards, Combine All Visible Layer. - [Added] export options to export inverted roughness (smoothness for Unity users), metallicity & height. - [Added] a "Repeat Export Textures" to the "File"-Menu to directly export the textures with the last settings. - [Added] a button to the "Correction" filter to fast invert a channel. - [Added] a slider to the "Correction" filter to adjust the HUE value. - [Added] a "Layer"-Control panel on side of the materials settings to change the layer settings. Like name, color, channels blending etc. - [Added] for filters with masks (Gradient, Noise, Ambient Occlusion) a button to export the mask to a new layer, with multiplication as default blending mode on the color channel. - [Added] for baking a button to export the result into a new layer. - [Added] the slider for the mesh and UV scale to the project settings. Removed the slider from the scene tab. If the mesh is to small for paint on, it will here be easier to find. - [Changed] the file extension from "*.dat" to "*.ubpd" (Compressed). The old format is still supported for loading. - [Changed] the filter window that not needed mask options get hided for the blur and correction filter. - [Changed] wireframe, faces and islands bake generates now always a new layer. - [Changed] the color picking function -> [RMB] samples the color from the layer, [RMB] + [Alt] samples the color from all layers blended together. - [Changed] the filling bucket function -> [LMB] fills the current layer by sampling all layers together, [LMB] + [Alt] fills the current layer by sampling only the current layer. - [Changed] the default emission drawing setting to disabled. - [Fixed] a bug with the scrolling in the baking window and at the same time zooming out in the preview space. - [Fixed] at selection mode, while clicking in a window over the preview space, the selection was changed. - [Fixed] a bug with the layer blending modes. - [Fixed] a bug with brush blending. - [Fixed] a with asynchrone history panel. - [Fixed] a bug in the drawing Undo\Redo system. - [Known Bug] the brush preview is not correct always anymore. Will be fixed in future updates. Because the road map has changed with this Layer-Update, there must be a following second part Update with performance optimisations and missing features. So the next update plan looks as following: - Channel display to 3d viewport \ unlit mode - Layer performance optimisations - Layer edit -> Cut, Copy, Past, Move, Rotate selection from layer - Decals from selection - Blur Tool - Duplicate (Copy\Past) brush - Slider redesign for more percission values from 0.0 - 1.0 or 0 - 255 - Default Value Buttons for Slider, 0.0, 0.25, 0.5, 0.75, 1.0 Update v0.307 (2022-03-10) Here you can reply on recent published news and updates. 1 post • Page 1 of 1 1 post • Page 1 of 1
OPCFW_CODE
ubuntu-phone team mailing list archive Mailing list archive Flipped HTC Desire Z Port I've ported Ubuntu Touch with "flipped" containers (booting directly into Ubuntu) to the Desire Z. First of all, thanks to #ubuntu-touch and especially Oliver Grawert for helping me with some of the issues I encountered. Device specific code is by the Andromadus team from XDA Developers since there's no official CyanogenMod for this device. You can check the port out at , it's linked as "flipped_vision" and the page has some installation instructions / status information. There's also the "unflipped" Port with less bugs and more device-specific features by utopykzebulon. Although I have almost no experience with kernel building and low-level things, here is my general advice for porting the flipped stuff: * There are apparently two different approaches for mounting the flipped images, and it seems like my port uses the old-style approach. * The android_build repository on phablet.ubuntu.com has all the required changes to create the android zip file. It downloads files for a generic initramfs and puts them in "out/target/product/devicename/ubuntu-root". I think that changes to those files are not automatically put back into the ramdisk. (I've added an additional step to repack the ramdisk after changing the ubuntu-root/ dir contents.) That ramdisk is then used to create boot.img instead of the cyanogenmod ramdisk (which is used for android-boot.img). * The "scripts/touch" file in the ramdisk needs to figure out the data partititon's device file name (like /dev/foo). It might fail for your device and cause the boot process to fail. I've hard-coded the device path for the Desire Z for now, maybe we can set this in the device config at build time later. * If the initramfs script has problems finding the data partition, then /usr/lib/lxc-android-config/update-fstab could have problems too. * /etc/init/lxc-android-boot.conf might be interesting as well. * The Ubuntu rootfs needs a udev rules file for your device. Check the /usr/lib/lxc-android-config/70-*.rules files as an example. You can create that file by looking at the ueventd*.rc files for your device from cyanogenmod and transforming the /dev/ settings to udev syntax. * There are some requirements for the kernel config. For example, I had boot failures before I enabled CONFIG_VT=y and CONFIG_VT_CONSOLE=y. * It's a good idea to check dmesg, logcat (you need to android-chroot first) and the various log files in /var/log/upstart if there are problems.
OPCFW_CODE
18-441/741: Computer Networks Project 2: Content Distribution Content in the Internet is not exclusive to one computer. For instance, an episode of your favorite sitcom on Netflix will not be stored on one computer, but a network of multiple machines. In this project, you will create an overlay network that connects machines that replicate common content. You will implement a simple link-state routing protocol for the network. We could then use the obtained distance metric to improve our transport efficiency. 2. Project Tasks Your main goal is to create a program ./contentserver. You should feel free to implement contentserver in C or Java but include a makefile/build.xml to ensure we can correctly compile the code. After initial configuration, ./contentserver needs to wait for additional input via the keyboard. 3.1 Configuration File Your node should be able to read a configuration file (by default) “node.conf” located in the same directory in which we execute the server. Alternatively, you should be able to take any configuration file provided with –c option when we execute the peer process (e.g. with a command-line ./contentserver –c node.conf ) Here is an example of a node.conf: 2 Computer Networks| 18-441/741 uuid = f94fc272-5611-4a61-8b27-de7fe233797f name = node1 backend_port = 18346 peer_count = 2 peer_0 = 24f22a83-16f4-4bd5-af63-9b5c6e979dbb,pi.ece.cmu.edu ,18346,10 peer_1 = 3d2f4e34-6d21-4dda-aa78-796e3507903c,mu.ece.cmu.edu ,18346,20 These are the optional details. You could assume that the configuration file will be well-formatted. However, all fields are optional. If the uuid option does not exist, you must generate a new UUID (see 3.1) and update the configuration file (either node.conf or those specified in –c parameter) with the – uuid – The node’s unique identifier (see 3.1, default: not-specified) – name – A user-friendly name used to call the node – backend_port – Back-end port of the node (UDP-Transport backend, default:18346) – active_metric, extended_neighbors – (see extra credit, default: 0) – peer_count – the number of neighbors specified in this configuration (default: 0) This option will follow by exactly peer_count lines providing peer information. – peer_x = uuid,hostname(or ip address),backend port,distance metric A comma-separated value listing initial neighbors for this node and their distance metrics 3.2 Node Identifier Each peer node should have a unique identification. A Universally Unique Identifier is a 16-byte (128-bit) number, which could be used to uniquely, identified information without the need of central coordination. The UUID should be generated upon the creation of each peer node. It should be persisted in the configuration file so that we can identify the same node across restart. You could execute ‘uuidgen’ command to generate a new UUID. For this project, you could use java.util.UUID.randomUUID or uuid_generate/uuid_unparse provided by libuuid (include /usr/include/uuid/uuid.h and compile with -luuid). The libraries are available on the cluster. Response: the uuid of the current node, e.g: You should add additional message to your backend; A Keepalive message. The purpose of such message is to notify your neighbor that you can still reach them. For example, each node could send out a keepalive message to its neighbor every 10 seconds. A neighbor could be declared as unreachable if we miss three consecutive keepalive messages. 本网站支持淘宝 支付宝 微信支付 paypal等等交易。如果不放心可以用淘宝交易! E-mail: [email protected] 微信:itcsdx
OPCFW_CODE
# 101803503 Shreshth Arora import pandas as pd from os import path import sys import math def validate_input_file(data_file): if not (path.exists(data_file)): print(" 🛑 File doesn't exist") exit(0) if not data_file.endswith('.csv'): print("🛑 CSV is the only supported format") exit(0) try: input_file = pd.read_csv(data_file) except Exception: print( "🛑 Error Opening File" ) exit(0) col = input_file.shape if not col[1] >= 3: print(f"🛑 {data_file} should have 3 columns ") exit(0) k = 0 for i in input_file.columns: k = k + 1 for j in input_file.index: if k != 1: val = isinstance(input_file[i][j], int) val1 = isinstance(input_file[i][j], float) if not val and not val1: print(f'Value is not numeric in {k} column') exit(0) return 1 def validate_result_file(data_file): if not data_file.endswith('.csv'): print("🛑 CSV is the only supported format for result files") exit(0) return 1 def validate_weights(data_file, weights_str): input_file = pd.read_csv(data_file) col = input_file.shape weight = [] split_weights_str = weights_str.split(',') for split_weights_str_obj in split_weights_str : split_weights_str_obj_ = 0 for split_weights_str_obj_char in split_weights_str_obj: if not split_weights_str_obj_char.isnumeric(): if split_weights_str_obj_ >= 1 or split_weights_str_obj_char != '.': print("🛑 Weights not in Corrent Format") exit(0) else: split_weights_str_obj_ = split_weights_str_obj_ + 1 weight.append(float(split_weights_str_obj)) if len(weight) != (col[1] - 1): print(f"🛑 No. of Weights should be same as no. of columns in {data_file}") exit(0) return weight def validate_impacts(data_file, impact_str): input_file = pd.read_csv(data_file) col = input_file.shape impact = impact_str.split(',') for i in impact: if i not in {'+', '-'}: print(f"🛑 Only \" + \" or \" - \" are allowed not {i}") exit(0) if len(impact) != (col[1] - 1): print(f"🛑 Columns in {data_file} and Impacts shouls be Equal in No.") exit(0) return impact def input_matrix_normalized(data_file): data_frame = pd.read_csv(data_file) columns = list(data_frame.columns) columns.remove(columns[0]) for col in columns: root_sum_square = 0 for row in data_frame.index: root_sum_square = root_sum_square + (data_frame [col][row]) * (data_frame [col][row]) root_sum_square = math.sqrt(root_sum_square) for row in data_frame.index: data_frame.at[row, col] = (data_frame [col][row]) / root_sum_square return data_frame def matrix_normalized_weighted(matrix, weights): matrix_weighted = matrix columns = list(matrix_weighted.columns) columns.remove(columns[0]) k = 0 for col in columns: for row in matrix_weighted.index: matrix_weighted.at[row, col] = weights[k] * matrix_weighted[col][row] k = k + 1 return matrix_weighted def matrix_best_worst(matrix, impacts): columns = list(matrix.columns) columns.remove(columns[0]) best = [] worst = [] k = 0 for col in columns: if impacts[k] == '+': best.append(max(matrix[col])) worst.append(min(matrix[col])) else: best.append(min(matrix[col])) worst.append(max(matrix[col])) k = k + 1 return (best, worst) def euclid_dist(matrix, best, worst): columns = list(matrix.columns) columns.remove(columns[0]) s1 = [] s2 = [] for row in matrix.index: ideal_best_sum = 0 ideal_worst_sum = 0 k = 0 for col in columns: best_diff = best[k] - matrix[col][row] worst_diff = worst[k] - matrix[col][row] ideal_best_sum = ideal_best_sum + ( best_diff ** 2 ) ideal_worst_sum = ideal_worst_sum + ( worst_diff **2 ) k = k + 1 ideal_best_sum = math.sqrt(ideal_best_sum) ideal_worst_sum = math.sqrt(ideal_worst_sum) s1.append(ideal_best_sum) s2.append(ideal_worst_sum) return (s1, s2) def topsis_final_ratio(data_file, s1, s2): frame = pd.read_csv(data_file) score = [] for i in range(len(s1)): sum = s1[i] + s2[i] sum = s2[i] / sum score.append(sum) frame['Topsis Score'] = score score = pd.Series(score) score = score.rank(ascending=False, method='min') frame['Rank'] = score frame['Rank'] = frame['Rank'].astype('int') return frame def main(): if len(sys.argv) < 5 : print(f'🛑 Provide 4 parameters in this manner: topsis data.csv \"1,1,1,2" "+,+,-,+\" result.csv') exit(0) if len(sys.argv) > 5 : print('🛑 Cannot pass more than 4 parameters ') exit(0) INPUT_FILE_STR = sys.argv[1] WEIGHTS_STR = sys.argv[2] IMPACTS_STR = sys.argv[3] RESULT_FILE_STR = sys.argv[4] # Validating string Input to Program if validate_input_file(INPUT_FILE_STR): print(f"✅ {INPUT_FILE_STR} in correct format") WEIGHTS = validate_weights(INPUT_FILE_STR,WEIGHTS_STR) if WEIGHTS: print(f"✅ {WEIGHTS_STR} in correct format") IMPACTS = validate_impacts(INPUT_FILE_STR,IMPACTS_STR) if IMPACTS: print(f"✅ {IMPACTS_STR} in correct format") if validate_result_file(RESULT_FILE_STR): print(f"✅ {RESULT_FILE_STR} in correct format") print(f"📝 Generating Results and saving to {RESULT_FILE_STR} ... ") # Main TOPSIS Algorithm MATRIX = input_matrix_normalized(INPUT_FILE_STR) MATRIX_WEIGHTED = matrix_normalized_weighted(MATRIX, WEIGHTS) (BEST, WORST) = matrix_best_worst(MATRIX_WEIGHTED, IMPACTS) (S1, S2) = euclid_dist(MATRIX_WEIGHTED, BEST, WORST) TOPSIS_RANKED = topsis_final_ratio(INPUT_FILE_STR, S1, S2) print( '\033[1m' + 'Final Matrix:' + '\033[0m' ) print(TOPSIS_RANKED) TOPSIS_RANKED.to_csv(RESULT_FILE_STR, index=False) if __name__ == "__main__": main()
STACK_EDU
Automate container security with Dockerfile pull requests Integration with your source code managers and issuing pull requests to fix issues has been part of Snyk’s success in helping our customers fix application dependencies for several years. Now, we want to help you address container security in a similar way. We’re happy to share that we are extending Snyk Container by helping you automatically fix issues in your Dockerfile to keep an up-to-date base image at all times. We’ve recently started bringing Dockerfiles to the forefront in Snyk by detecting them automatically from git repos, surfacing their base image vulnerabilities, and advising you on base images you can upgrade to in order to decrease the number of security issues. Now, we want to take it to the next level and do the fix part for you. Starting today, Snyk provides your choice of automatic or manual creation of pull requests containing the needed change in your Dockerfile to use an alternative, less vulnerable base image. This automation continues to improve your team’s efforts in handling large numbers of container vulnerabilities by not only pointing out the container issues early, but also making it simple and fast to mend them as well. Automated code-level security fixes for containers Up until now, Snyk provided base image recommendations you could elect to use. In the example shown below, we’re using node:10.4.0 as the base image, and Snyk Container shows us that it contains 956 vulnerabilities, out of which 453 are of high severity (as of the time of this writing). For this image, based on data gathered by our security teams, we would recommend using a different base image to lower the security risk. In this example, one of the alternatives will be a minor upgrade to node:10.23.2, which has only 64 high severity vulnerabilities. The new fix PRs allow you to select other upgrade options, if you prefer, but we also enable fix PRs to be automatically created for minor upgrades. The new process consists of detecting your Dockerfile and providing you with base image recommendations following the below logic: - Minor upgrade – the safest and best upgrade that does not drastically change the framework versions (Node in our example) - Major upgrades – an option for a major version upgrade that will reduce more vulnerabilities, but with potentially greater risk of application incompatibility due to the major version change - Alternative upgrades – viable alternative image options for replacing your current base image with other, typically slimmer base images that provide an even greater reduction in vulnerabilities, but due to their slim nature they could quite possibly require some additional steps to use Automated pull requests will be issued for you to review and merge only for the latest minor upgrade base image version. Should you choose to manually open a pull request, you can do so and update to any alternative base-image from our recommendations mentioned above. These fix PR features are available now to all Snyk Container users on all plans, including free accounts. Snyk Container provides base image recommendations and automatic pull requests for Docker Official Images. This curated set of Docker repositories on Docker Hub are designed to provide essential base images that serve as the starting point for the majority of container users and cover a wide range of popular programming language runtimes, data stores, and other services. As part of the Docker Official image program, Docker sponsors a dedicated team that is responsible for reviewing and publishing all content in the official images and this team ensures that security updates are applied in a timely manner. This is particularly important as Official Images are some of the most popular on Docker Hub. We do have plans to expand beyond Docker Official images and would love your feedback on other sources upstream images you’d like us to cover as well as how you curate your own internal custom images, if that’s part of your process. The new capability is available via the Snyk web UI and API and can be easily configured. We support all git-based repository managers in Snyk, including: - Bitbucket Server - Bitbucket Cloud - GitHub Enterprise Server Refer to our documentation for further details. Let us know what you think So now you can settle in and let Snyk fix container security issues for you by helping maintain your Dockerfiles with an up-to-date base image. The only thing left for you to do is merge and keep safe! We are excited to hear what you think so we can keep on improving your experience. If you have any feedback or specific use cases you think should be addressed, let us know.
OPCFW_CODE
With the work going on to draw into tiled areas to correspond to the tiled texture that fronts it, it makes sense that we draw on demand, rather than having the drawing driven by Gecko in response to events sent. Doing this would allow us to more intelligently retain tiles of the page, and to respond quicker to movement/do less unnecessary drawing. It would also allow us to more easily interrupt drawing, in the situation that we can't keep up with what's being drawn on the screen, and should make viewport prediction (bug 729528) more efficient. For the record, we attempted something similar when using the Java compositor, in bug 716581. I think there are a few ways we can go about this, here's one: - Separate layer size from buffer size - Manually size the display-port to the size of the page - Rename 'draw' events to 'invalidate' events, and just record the invalid region in layers - Add interfaces necessary to request tiles be drawn via the CompositorParent (<-- massive simplification!) - Augment the existing progressive texture upload code (bug 732917) to drive drawing as well as uploading tiles - Optional: Improve the request interface and progressive upload code to prioritise tiles The progressive tile upload code is already interruptable. We may use the current window size to pre-render some tiles before they're requested(?) Another (maybe better, maybe harder) way of going about it would be to add a flag that would mark a frame 'boundless', somehow? (the equivalent of manually setting the displayport to the page size) This would make the display-port sizing part a lot less delicate. Ideas very much welcome, especially from people that know layout and can point out the stupid assumptions I've probably made. I don't yet understand what you're proposing, especially what happens on the main thread vs the compositor thread. Are you talking about doing tile uploads on the main thread synchronously with drawing? (In reply to Robert O'Callahan (:roc) (Mozilla Corporation) from comment #1) > I don't yet understand what you're proposing, especially what happens on the > main thread vs the compositor thread. Are you talking about doing tile > uploads on the main thread synchronously with drawing? No, though that'd certainly be nice (and will sort-of be the case when tiles are gralloc-backed). I'm proposing the main thread will not do any drawing until asked to by the compositor thread - Instead, it will just keep track of what tiles it's been asked previously to render, and it will invalidate them as necessary. I don't fully understand what's being proposed either, but gecko has to drive drawing in many common cases like CSS animations/transitions, animated gifs, canvas, video, etc. Round-tripping through the compositor adds latency. This is the approach that was taken with fennec 2.0 and it worked badly for the examples above. But again, don't fully understand the details here. As a side note, it's very easy for us to interrupt drawing by having a flag in shmem that the compositor sets and our drawing code reads in critical loops. BenWa - how is this related to the tile work you are doing? tracking-fennec: ? → - You need to log in before you can comment on or make changes to this bug.
OPCFW_CODE
Unity: How to manage a large amount of prefab variations? The project that I'm working on demands the ability for game designers to be very flexible when balancing, customizing items, etc. The most difficult requirement to meet at the moment is that they want to drag-drop, say, a weapon prefab onto a weapon field in some game entity component and from there be able to tweak the prefab's parameters. If I provide some tools that create derived prefabs at such moments, then we'll end up with possibly hundreds of pretty much same prefab variations and also loose the ability to edit the initial prefab and transfer the changes to derivatives. What I'm starting to consider is introducing a "Data Map" structure in every entity that requires high level of parameter customization. Every parameter would be set through methods like Set/GetFloat("key", "value"), Set/GetFloatArray, Set/GetInt, etc. So basically a dictionary that maps a string to a predefined set of data types. When a prefab of such entity would be drag-dropped into a field of another component, an editor tool would separate the "Data Map" from the prefab, allow the game designer to tweak the values of the keys and eventually serialize it to json. During runtime uppon instantiation of a prefab with a "Data Map" a manager would check the deserialized overrides and override the "Data Map" with the tweaked values if needed. Though to me this sounds overly complicated and maybe some of you had a similar situation and came up with a simple solution? Maybe it's worth trying the ECS approach? Thanks. An example: Let's say there are prefabs: Axe, Sword, Bow with a bunch of parameters configured, say Damage, Durability, etc. There are many other components that can accept a weapon prefab and once it's dropped, the designers want to have "on the spot editing" say, okay, for this situation here the Axe prefab needs to be a little different and have Damage+3. Creating a new prefab means that the link to the initial Axe prefab is lost and there are now two Axe prefabs that need to be managed - this is the problem we're trying to address. You can go with object pooling The problem is not instance creation/management, but how to provide a way for game designers to do small/minor tweaks of prefabs during edit mode and where/how to store such changes and then apply to instances of prefabs during runtime (instead of creating new prefabs for every tweak). Then you can add that prefab in a seperated pool possible solution http://forum.unity3d.com/threads/replace-game-object-with-prefab.24311/ or this http://answers.unity3d.com/questions/22530/changing-the-prefab-an-object-is-associated-with.html Can you say a bit more about what traits you hope to preserve via the link to the "initial axe prefab"? Is it data values (damage, etc.), or is it the rest of the prefab junk, meshes, components, etc.? In your ideal case, if e.g. someone makes AwesomeAxe with +3 damage and later changes the base axe to have 2 more damage, should AwesomeAxe be +5? I'm trying to understand the constraints here. Only the data values should be tweakable after the drag-drop and the rest of the prefab junk should stay as in the "initial prefab" (and also be transfered if changed in the initial). Regarding data changed on the "initial prefab" - if a field was tweaked/overrided in the derived prefab, it should NOT transfer, but should transfer if it wasn't touched. For example, say you drag-dropped an Axe that has two data fields: damage(5) and durability(3). You tweak the derived prefab to have damage+3 and after that you change the initial prefab to have damage(1): the initial prefab should have damage 1 and the derived should have 8. But if, after the drag-drop, you first go and change the initial prefab's durability to 5 and then go to derived prefab it should also show 5 (not 3). Though I'm still not 100% clear on what your designers need, here's one approach that should get you easy customization while maintaining prefab connections. The limitation is that you still need unique prefabs for each distinct thing you're going to store outside of a scene. Core concept is either the "Decorator" pattern, or maybe the "Mutator" pattern. Either way I'll call them Mutators. First, you need a base class that will be modified. This class has the various values you want to allow designers to set. public class WeaponBase : MonoBehaviour { // values designers set directly *tied to the prefab* public float BaseDamage = 1.0f; public float BaseDurability = 10.0f; public int handsRequired = 1; // cached values built up from mutators below this object private float modDamage = 0.0f; private float modDurability = 0.0f; void Start () { WeaponMutator[] mutators = GetComponentsInChildren<WeaponMutator>(); for(int i=0; i<mutators.Length; i++) { modDamage += mutators[i].getModDamage(); modDurability += mutators[i].getModDurability(); handsRequired = Mathf.Max(handsRequired, mutators[i].getHandsRequired()); } } // update cache values in Update() if you want them to be changeable real-time. } Then, you need a 'mutator' script, which gets placed on a "CustomizeWeapon" prefab somewhere. In that script, you store values: public class WeaponMutator : MonoBehaviour { // values designers set directly *in the scene hierarchy* public float ModDamage = 0.0f; public float ModDurability = 0.0f; public int ModHandsRequired = 1; public float getModDamage(){ return ModDamage; } // ditto for getModDurability, getHandsRequired, etc. } (These could be merged, but I think to make the purpose clear for your designers, keeping them separate is helpful.) Then for each core weapon type you do want individual prefabs for, you can use just the WeaponBase script if that's sufficient. If not, you can do WeaponMutators to define it as you need. Your prefabs are then fairly simple. A standard 2-handed axe prefab might be: Axe (contains all the generic axe data in a WeaponBase instance) 2H (GameObject with a WeaponMutator, just defines handsRequired as '2') Then in your scene if the designers want to make some guard's 2H axe really strong, they could just drag-drop your WeaponMutator prefab under 2H, rename it, give it values: Axe 2H Modifier NatalyaBuff (another WeaponMutator with ModDamage = 5.0f) (Bolded to show which part is still linked to a prefab.) There are of course many variations on this style of composition, and what I describe here is standard "fits in a StackOverflow answer" clunky. If I were implementing this myself, I would probably build this using lists of ScriptableObjects in place of Mutator prefabs, perhaps something like what's described here. But it depends a lot on what kind of UX your designers will be happy with. Hope this gets you headed in the right direction. Thanks for those two documentations though. :) "Decorator" pattern and "Mutator" pattern Thanks for this idea. Why are you iterating over mutators in both WeaponBase and WeaponMutator? I assumed only WeaponBase needed to iterate, leaving WeaponMutator to just expose the various modifier-values. Good catch! When I wrote this I apparently forgot that GetComponentsInChildren recurses for you. I've edited the example code to reflect this. I think in a more robust solution, I would instead have each layer get only its direct children, to allow for an ordered series of modifications. Since ModDamage here is only adding, never multiplying, order of operations doesn't matter. However, not allowing multiplicative tweaks for an RPG seems very strange. (For example) Have a script named PrefabDatabase. Define an Enum called PrefabType that covers all your prefabs. Then define a Dictionary< PrefabType, GameObject >. Assign all your prefabs to this dictionary. There is an issue here though. You can't initialize dictionaries in inspector; here comes the solution: [System.Serializable] public struct EnumPrefabPair { public PrefabType type; //For "TL;DR"ers, this is an enum that has a value for all prefabs... public GameObject prefab; //...and this will be the prefab referance. } public EnumPrefabPair[] typesAndReferances; Initialize this array in inspector and then in your Awake(), run a foreach loop on it to pass values to your dictionary. Then you can simply referance your prefab by enum value. What you can do is just replace the current prefab with a new prefab for that moment.for an example var prefab1 : GameObject; var prefab2 : GameObject; // Prefab to use private var prefab : GameObject; function Start () { prefab = prefab1; } function Update () { // Use the first prefab when pressing 1 and the second prefab when pressing 2.. if ( Input.GetKeyDown ("1") ) prefab = prefab1; if ( Input.GetKeyDown ("2") ) prefab = prefab2; if ( /* Something happens ?? */ ) Instantiate (prefab, transform.position, Quaternion.identity); } Referred from : link No we can't. We'll have at least a 100 of item prefabs. If we start to create additional prefabs for every "derivative prefab" this will become extremely tedious. We don't want prefabs like AwesomeBow, AwesomeBowWithABitMoreDamage, AwesomeBowWithABitLessDurability, we only want to have AwesomeBow prefab and a mechanism that would allow to introduce small parameter changes on the editor side and transfer those changes to realtime. Sorry but you are misreading the problem. say, 100 of axes, granades, guns, etc. And whenever a game designer drops one of these in a prefab field of a component, they want to be able to tweak it's parameters if needed. For example, they drop one of the 100 items, say SuperGranadeLauncher and they want that it's parameters, "reload speed" if dropped onto CharacterX be 2.0f, but if drooped onto CharacterY be 3.0f, oh and also if on CharacterY the damage should be +2. But they don't want to have additional prefabs like SuperGranadaLauncherWithFasterReloadForCharX, etc. can't imagine any other solution rather than changing animation(using animator controller) and this solution though. :/ Isn't there any possibility use animator controller?This is very useless though.Because Animator controller is the easiest way,next is this. Chris Mills provided the right answer
STACK_EXCHANGE
Santiment, the market intelligence and data platform, recently analyzed blockchain development activity. Then he produced a list of the 10 crypto projects that were the most active in terms of development, in the last 30 days. Discover the incredible performances recorded by the platform smart contracts Cardano (ADA). Cardano tops them all in terms of development activity! The Cardano blockchain tops the list of the top 10 crypto projects with the most outstanding development activity on GitHub, according to Santiment. The market intelligence and data platform said: “ Cardano head and shoulders above all other digital assets in development activity “. he apparently that several GitHub software development hosting requests have been submitted to Cardano in the last 30 days. Santiment suggests that the blockchain is particularly recorded 18% more development activity on GitHub than Polkadot (DOWRY). In this regard, he also took over the second largest crypto project by market capitalization, Ethereum. In fact, Polkadot (DOT) comes second to Cardano (ADA). Then there is Kusama (KSM), Cosmos (ATOM), Ethereum (ETH), Internet Computer (ICP). In the rest of the ranking, there is Status (SNT), Decentraland (MANA), Filecoin (FIL) and Vega Protocol (VEGA). So developers believe in the potential of the blockchain? Santiment explained that it can be said that it is a project that registers strong development activity It is promising. He specifically said:Developer time is a relatively expensive resource and if many developers are devoting their time and skills to a given project, it could mean a number of things.“. “These people believe that the project will succeed. The project offers more features. It is less likely that the project is just an exit scam“Santiment added. The high rate of development activity on Cardano could therefore indicate that the developers believe in the success of the crypto project. It could also be that this blockchain has several functions. This is at least the second time that the Cardano blockchain has been ranked as the best crypto project in terms of development activity. In fact, in 2021, it had the most GitHub commitments among crypto projects, according to Santiment. We can therefore say that the platform ofsmart contractsCardano has real potential. this, if only for the developers. Get a summary of the news in the world of cryptocurrencies by subscribing to our new servicedaily and weekly so you don’t miss any of the essential Cointribune! Far from dampening my enthusiasm, an unsuccessful investment in cryptocurrency in 2017 only increased my enthusiasm. So I decided to study and understand the blockchain and its many uses and pass my pen knowledge related to this ecosystem.
OPCFW_CODE
Level-triggered epoll_wait() on gdb's MI output through pipe doesn't notify the existence of the "(gdb)\n" line In an application, I spawn gdb and connect its stdout (and others) to pipes. I then epoll_wait on this pipe (and others) to be notified of when there's a response from gdb. Every time epoll_wait wakes up with a positive return value (there is an fd to read from), I read one line from gdb's stdout pipe (if that's the fd with an event), and go back to epoll_wait. This all works fine, except sometimes the last line of gdb's response (which is invariably "(gdb)\n") is not read, and epoll_wait returns 0 forever. If I wait a few seconds, and then read from gdb's stdout pipe despite the fact that epoll_wait is returning 0, I can receive the "(gdb)\n" line. What's going on? That data is clearly in the pipe ready to be read from, but the level-triggered epoll is not generating an event for it. Some notes: The pipe connected to gdb's stdout is created with O_NONBLOCK. epoll_create1 is called with EPOLL_CLOEXEC (and nothing else), i.e. it's level triggered. I use GNU getline() to read a line After every call to getline(), I clearerr() the fd of the pipe (I do this because in a test application I noticed if EOF is reached (because the other end of the pipe hadn't finished writing the whole line before I read it), stdio-based functions get stuck thinking EOF is reached. I can handle reading the line chunk by chunk, so this is fine. I also tried removing the call to clearerr() to no effect) If I add a one second delay after every line read and before epoll_waiting again, epoll_wait will immediately return the stdout fd for every line of the initial version+license message, but still not for the last "(gdb)\n" line. I suspect that the issue is the buffering that the C standard library does. Here's my guess as to the timeline of events: You call getline getline calls read Two lines are available in the pipe, and read returns both of them getline gives you the first line and buffers the second one You call epoll_wait epoll_wait blocks, since the kernel sees there's no data left in the pipe epoll_wait times out a few seconds later You call getline again getline gives you the second line that it buffered earlier The core problem is that as far as the kernel is concerned, there's no difference between data in the userspace stdin buffer and data that you've already read and processed. To work around the problem, never give the FD to any functions that wrap it in a FILE *, and do all of the reading yourself with the read syscall directly. Ah that makes sense. I still can't explain why if I add a delay of 1s between epoll_waits, I still get an event for every line of gdb output (it initially outputs like 20 lines), but perhaps that has just to do with the last line being so short. You're right, I should forego reading through FILE *. Thanks
STACK_EXCHANGE
Future-proof way to detect iPhones with a notch (like the iPhone X) I have an application that I want to go full screen (hiding the status bar) on iPhones that do not have a notch, but stick to the safe area (keeping the status bar visible) on iPhones that have a notch, like the iPhone X. I cannot just restrict my application to the safe area on all devices, since the safe area includes the status bar on iPhones that do not have a notch. The approaches I can think of are: Checking the device model, or Looking at the safe area insets for the main UIWindow and trying to infer something from the actual values (e.g. "if top inset is >22, then it has a notch") None of the above is very clean, not future-proof. As of now, doing the calculation on the basis of status bar height and top inset area is the only way to determine the notch in a more accurate way. Because if there is a notch then definitely, the status bar will have a bigger height then normal. In the future, the more chances are that Apple will remove the notch to make more room. In this, the status bar height will be back to normal for sure because there will be no reason to make it bigger. What you say? It is very unlikely that you will be able to build a future-proof version of this. No one expected the notch before it showed up. We won't expect the next twist either. How will your system deal with a second notch on the bottom edge, or a differently-shaped notch, or extra insets from the side? In my experience, follow Apple's advice as closely as you can (using Storyboards for example, which usually auto-adapt better than custom code). In this case, their advice is likely "don't adapt this way." But accept that you'll have to update UI for new devices, & design so that's as easy as possible. (BTW, when I say Apple's advice is "don't adapt this way," you might say "but Apple violates that kind of advice all the time." And the answer is, "yes, they do." Been screaming about that since at least when iTunes broke all the rules about how window frames are supposed to work. Yeah. Apple breaks their rules all the time…) All that said, your "top inset > 22" is probably the best answer not in that it means there's a notch, but that there is enough room for the information (or as close as you can get to answering that actual question). I was not able to find any "future-proof" way to do this, so I am finally relying on the safe area insets. One important note, though, is that this should not be done by checking the top inset alone, as the top inset may vary, for example, when the in-call status bar is being shown. It is better to check either the bottom inset or both bottom and top.
STACK_EXCHANGE
How to add support of multi-line arguments for Python scripts using argparse? I've just created a Python project that uses argparse for parsing arguments. But it seems that it does not support multi-line arguments. One can check the example/command-line.sh in the project, and will find it does not understand the following style #!/bin/bash ../scripts_gen.py --template template.txt \ --save-to scripts \ --param "{'data':'datasets.txt', \ 'lambda':[`echo 0.{0..9}|sed -E 's/\s+/,/g'`], \ 'seed':[233,874]}" \ --format "{data}_lambda={lambda}_seed={seed}.sh" \ --delete Note that this is legal in shell script: one can always write $ ls -l \ > -f in console or shell script file (no prompt in this case). So can I add support for this via argparse? Thank you. This should be OK. What error do you see? You might not need the \ within your quoted string. @SethMMorton In the above example, I will see the Python script can recognize the first argument --template but ignoring the rest. Then the shell later complains that it cannot find commands like --save-to etc.. The problem is in your bash script. If it worked as intended, python wouldn't even know there's a newline between arguments--the shell handles that and removes the newline. Most likely there's a space after the first backslash. @alexis I was using Cygwin but later tried Debian 7.0 and it seems OK. It might be a problem of Cygwin. But Cygwin can recognize the ls example, so it's still a little weird. Ok, if the same file works properly on Debian, then it's not a space after the backslash. Perhaps it's a problem with the line endings in your script? @alexis Yes I think this is the reason. Since git will determine whether to use CR/LF or LF automatically, the pulled script will use CR/LF under Windows, which is not supported by Cygwin by default. The script runs normally after dos2unix. I've also found a thread about this Cygwin's default behavior. http://comments.gmane.org/gmane.os.cygwin/115223 . Anyway, could you post your comment as an answer? There you go. With some additional comments. Post-facto answer, based on my comments: The problem is in your bash script. If it worked as intended, python wouldn't even know there's a newline between arguments--the shell handles that and removes the newline. Most likely there's a space after the first backslash. But since the same file works properly on Debian but fails under cygwin, it was not a space after the backslash. Perhaps it's a problem with the line endings in your script? So, the problem was automatic CR/LF conversion by git, combined with a strange refusal by cygwin to understand the line-ending conventions of its host operating system. Though you fixed it by hand-converting the script back to unix line endings, I would recommend a more robust solution: You could enable CR/LF support in cygwin (since you imply that it is an option), but my preference would be to also disable git's CR/LF mapping. And check that all your common tools and editors are configured to handle both kinds of line ending properly.
STACK_EXCHANGE
Feature/sqzhang/ldas coup I believe this is the branch @saraqzhang referred to in #84. Will test according to GEOSgcm_GC guidelines @sdrabenh @lltakacs , @saraqzhang's Land increment code has passed 0 diff tests for LDAS and GCM. My tests of this branch for AMIP, Regular Replay, 0-Increment Replay, and Regression (for Old and New Land) are at: /discover/nobackup/projects/gmao/g6dev/jperket/tests/JPTest_Test_NL_2019-10-08 /discover/nobackup/projects/gmao/g6dev/jperket/tests/JPTest_Test_OL_2019-10-08 (Controls I used are: /discover/nobackup/projects/gmao/g6dev/jperket/tests/JPTest_Develop_NL_2019-10-08 /discover/nobackup/projects/gmao/g6dev/jperket/tests/JPTest_Develop_OL_2019-10-08 ) @wmputman please let me know your input. I verified zero-diff results for restarts and standard history collections. With the transition now to GIT it will require that we formalize some processes for getting large code updates onto develop. With large code changes like this we need to have a code summary and review with appropriate attendance from the relevant gatekeepers. For this request we need at leas t @wmputman @JustinPerket @saraqzhang @sdrabenh and @lltakacs at the meeting. I'd like to target Monday afternoons for these code meetings. I will send a meeting request for 12:30pm Oct 21. @gmao-rreichle may also want to be at this meeting @saraqzhang @JustinPerket The CircleCI failed for a useful reason! Huzzah! It failed because of this code: https://github.com/GEOS-ESM/GEOSgcm_GridComp/blob/94221062a693f6d1bb0c566702fc3bf5f5660290/GEOSagcm_GridComp/GEOSphysics_GridComp/GEOSsurface_GridComp/GEOSland_GridComp/GEOScatch_GridComp/GEOS_CatchGridComp.F90#L2821-L2828 What you should do is change this to: IAU_NAMES = [character(len=11) :: "TCFSAT_IAU", "TCFTRN_IAU", "TCFWLT_IAU", & "QCFSAT_IAU", "QCFTRN_IAU", "QCFWLT_IAU", & ... where the len=11 is the length of the longest string in the constructor. (I think it's 11, but I might have counted wrong.) The reason this failed is that gfortran assumes all members of an array constructor are as long as its first member or shortest member. Something like that. So the first is 10 characters long, but eventually it sees a 9-character (or 11-character) and fails. I think @JustinPerket can fix this quickly in browser. Also, if you do, please use the [ ] version of constructor. It's just nicer. :smile: Passed! Thanks @JustinPerket good for me to learn about CircleCI failed reason. thanks! @sdrabenh, I would say the commits after your tests ( dd89651 to 7c1b607d5 ) are trivially 0-diff. And they are wrapped in the If statement controlled by @saraqzhang's LDAS_IAU flag, which is off by default anyways. Is there any further testing to do, assuming the CircleCI tests pass?
GITHUB_ARCHIVE
Written by: Paul Rubin Primary Source: OR in an OB World [T]he report of my death was an exaggeration. (Mark Twain, 1897) In a recent blog post, “Data Science Is Not Dead“, Jean-Francois Puget discussed and dissented with a post by Jeroen ter Heerdt titled “Data Science is dead.” Barring the possibility that Schroedinger shoved data science into a box and sealed it, both assertions cannot simultaneously be true. The central thesis of ter Heerdt’s post is that data scientists have developed powerful and easy to use tools, now deployed on cloud services, that let lay people do the analyses that previously required said data scientists, in effect putting themselves out of business. Puget responds that “there is a very long tail of use cases where developing a packaged service isn’t worth it”, and draws parallels to operations research. Common and important problems such as routing, machine scheduling, crew scheduling and so on have led to the development of problem-specific commercial software, but “many companies still have an OR department (under that name, or as part of their data science department) because they have operations problems that cannot be optimized with off the shelf software of services”. I’m siding with Puget on this, having experienced the “demise” of management science (if not all of OR) some decades ago. When I started on the faculty at Michigan State University, management science (whether under that name, “operations research” or something else) was a common and important element of business school programs. We had mandatory core courses in M.S. at both the undergraduate and masters levels, as well as higher level “elective” courses that were de facto requirements for some doctoral concentrations. We also participated in an interdisciplinary OR program at the masters level. Gradually (but not gradually enough for me), MS evaporated at MSU (a bit ironic given the respective acronyms). Some of the more applied subject matter was moved into “functional area” courses (production planning, marketing, finance); most of the more conceptual subject matter just went away. As previously noted, canned software began to be available to solve many of the problems. The perceived need shifted from someone who understood algorithms to someone who could model the problem well enough to generate the inputs for the software. As Puget notes, there is still demand for OR/MS professionals because there are new problems to be recognized and modeled, and models to be solved that do not fit neatly into the canned software. I believe there is also another reason not to start shoveling dirt on the grave of OR/MS. Users who learned a few basic incantations in a functional area class, without learning how the magic works (or does not work), may misapply techniques or produce incorrect models. Those who learn OR/MS (or analytics) as incantations may also tend to be a bit too literal-minded. A possible analogy is the difference between a chef and someone like me who equates “cook” with “microwave on high”. A chef understands a recipe as a framework, to be adjusted as needed. You couldn’t get the cut of meat called for? Switch to this meat, then adjust this spice and cook a bit longer. On the (exceedingly rare) occasions I actually prepare a dish, I follow the instructions slavishly and try not to freelance at all. Along those lines, I attended a thesis proposal defense (for a student not trained in OR/MS) where the work involved delivery routing and included a variant of a traveling salesman model. Both the candidate and his committee took it as axiomatic that a vehicle could not pass through the same node in the routing graph twice because, well, that’s part of the definition of the TSP. So I posed the following simple question. You have a warehouse W and two customers A and B, all on the same street, with W between A and B. A is in a cul de sac, so the network diagram looks like A — W — B — whatever with any number of links to B but only the A-W edge incident A (and only A-W and B-W incident on W). Trivial exercise: prove that, under the strict definition of a TSP, A and B cannot be on the same route, no matter how close they are to W (and each other). My point here was not to bust the student’s chops. An OR “chef” building a network model for truck deliveries would (hopefully) recognize that the arcs should represent the best (shortest, fastest, cheapest …) way to travel between any pair of nodes, and not just physical roads. So, in the above example, there should be arcs between A and B that represent going “by way of” W. It’s fine to say that you will stop at any destination exactly once, but I know of only two reasons why one would route a vehicle with a requirement that it pass through any location at most once: it’s either laying land mines or dribbling radioactive waste behind it. Hopefully neither applied in the student’s case. The student, on the other hand, could not see past “arc = physical roadway” … nor was he the only one. After the defense, the student contacted two companies that produce routing software for the trucking industry. According to him (and I’ll have to take his word for this), neither of them had given any thought to the possibility that passing through the same node twice might be optimal, or to creating “meta-arcs” that represent best available routes rather than just direct physical links. If true, it serves as a reminder that canned software is not always correct software. Caveat emptor. The flip side of being too “cookie cutter” in modeling is being too unstructured. As an instructor, I cringed at students (and authors) who thought a linear regression model could be applied anywhere, or that it was appropriate to pick any variable, even a categorical variable, as the dependent variable for a linear regression. To me, at least, neural networks and software services sold as “machine learning” are more opaque than regression models. Having someone with modest if any data science training cramming whatever data is at hand into one end of a statistical black box and accepting whatever comes out the other end does not bode well. So, in addition to Puget’s “tail problems”, I think there will always be value in having some people trained at the “chef” level, whether it be in OR/MS or data science, working alongside the “line cooks”.
OPCFW_CODE
'''Data Intake: Main Script which starts The Preprocessor Options provided 1.Data Description 2.Data Imputation 3.Univariate Analysis 4.Bivariate Analysis 5.Data Wrangling 6.Download Dataset''' import pandas as pd import numpy as np import sys import data_impute import univariate import bivariate import download import wrangling functionality=[ "Data_Description", "Data_Imputation", "Univariate Analysis", "Bivariate Analysis", "Data Wrangling", "Download Dataset" ] def describe_column(df): #Describes a particular column print("Enter column name , which you want to describe :") colm=input() colm=colm.lower() print(df[colm].describe()) input("Press Enter to continue") return def description(df): #data description function desc_type = 0 print(df.info()) while (desc_type != -1): print("""Data description :\n 1.Numeric Columns Description\n 2.Object Column Description\n 3.Description of a particular Column""") desc_type = int(input("Enter -1 to exit this\n")) if desc_type == 1: print(df.describe(include=[np.number])) elif desc_type == 2: print(df.describe(include=[np.object])) elif desc_type == 3: describe_column(df) elif desc_type == -1: break else: print("wrong_input") return def find_target(df): #function for selecting target variable print("List of column names") colm_names=list(df.columns.values) for colm in colm_names: print(colm) target=input("Enter the target value ") target=target.lower() return target def change_column_lower(df): colm_dict={} colm_names = list(df.columns.values) for colm in colm_names: colm_dict[colm]=colm.lower() df.rename(columns=colm_dict,inplace=True) return df def preprocessor(): #Taking dataset as input if len(sys.argv)<=1: print("\nEnter dataset address") df_input=input() else: df_input=sys.argv[1] #Reading the dataset df=pd.read_csv(df_input) print(" -> Data taken from "+df_input) df=change_column_lower(df) #deciding the target variable target_colm=find_target(df) y=df[target_colm] x=df.drop(target_colm,axis=1) #Menu: while(1): print("\nWhat do you want") count=1 for i in functionality: print(str(count)+". "+i) count+=1 inp=int(input("\nEnter your input: ,-1 to exit\n")) if inp==-1: exit() elif inp==1: print(" --> Data Description:") description(df) elif inp==2: print(" --> Data imputation:") imp_obj=data_impute.Imputer(df) df=imp_obj.impute() elif inp==3: print(" --> Univariate Analysis:") uni_obj=univariate.Univariate(df) df=uni_obj.univariate_plot() elif inp==4: print(" --> Bivariate Analysis:") bi_obj = bivariate.Bivariate(df) df = bi_obj.bivariate_plot() elif inp==6: print(" --> Download Dataset") down_obj=download.Download(df) down_obj.make_dataset() elif inp==5: print(" --> Data Wrangling") wrangle_obj = wrangling.Wrangle(df) df=wrangle_obj.wrangle() return if __name__ == "__main__": preprocessor()
STACK_EDU
""" Mask images to remove unwanted pixels. This program is used to generate mask to specify which pixels should be considered "invalid" during spot finding and integration. It provides a few options to create simple masks using the detector trusted range, or from simple shapes or by setting different resolution ranges. Examples:: dials.generate_mask models.expt border=5 dials.generate_mask models.expt \\ untrusted.rectangle=50,100,50,100 \\ untrusted.circle=200,200,100 dials.generate_mask models.expt d_max=2.00 """ from __future__ import annotations import logging import os.path import pickle from typing import List, Optional, Tuple import libtbx.phil as phil from dxtbx.format.image import ImageBool from dxtbx.model.experiment_list import ExperimentList from scitbx.array_family import flex import dials.util import dials.util.log import dials.util.masking from dials.util.options import ArgumentParser, flatten_experiments Masks = List[Tuple[flex.bool, ...]] log = logging.getLogger("dials.generate_mask") phil_scope = phil.parse( """ output { mask = pixels.mask .type = path .help = "Name of output mask file." experiments = None .type = path .help = "Name of output experiment list file. If this is set, a copy of " "the experiments, modified with the generated pixel masks, " "will be saved to this location." log = 'dials.generate_mask.log' .type = str .help = "The log filename." } include scope dials.util.masking.phil_scope """, process_includes=True, ) def generate_mask( experiments: ExperimentList, params: phil.scope_extract, ) -> Tuple[Masks, Optional[ExperimentList]]: """ Generate a pixel mask for each imageset in an experiment list. Use the masking parameters :param:`params` and the experiments in the experiment list :param:`experiments` to define a pixel mask for each of the associated imagesets. The masks are generated using :mod:`dials.util.masking`. The masks will be saved to disk at the location specified by :attr:`params.output.mask`. If the experiment list contains more than one imageset, multiple mask files will be produced, with filenames differentiated by an appended number. Optionally, if a path :attr:`params.output.experiments` is set, a modified copy of :param:`experiments` with the masks applied will be saved to that location. Args: experiments: An experiment list containing only one imageset. params: Masking parameters, having the structure defined in :data:`phil_scope`. Returns: A list of masks, one for each imageset. A copy of :param:`experiments` with the masks applied (optional, only returned if :attr:`params.output.experiments` is set). """ imagesets = experiments.imagesets() masks = [] # Create output mask filenames num_imagesets = len(imagesets) if num_imagesets == 1: filenames = [params.output.mask] else: # If there is more than one imageset, append a number to each output filename name, ext = os.path.splitext(params.output.mask) pad = len(str(num_imagesets)) filenames = [ "{name}_{num:0{pad}}{ext}".format(name=name, num=i + 1, pad=pad, ext=ext) for i in range(num_imagesets) ] for imageset, filename in zip(imagesets, filenames): mask = dials.util.masking.generate_mask(imageset, params) masks.append(mask) # Save the mask to file log.info("Writing mask to %s", filename) with open(filename, "wb") as fh: pickle.dump(mask, fh) if params.output.experiments: # Apply the mask to the imageset imageset.external_lookup.mask.data = ImageBool(mask) imageset.external_lookup.mask.filename = filename if params.output.experiments: # Save the experiment list log.info("Saving experiments to %s", params.output.experiments) experiments.as_file(params.output.experiments) else: experiments = None return masks, experiments @dials.util.show_mail_handle_errors() def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None: """ Parse command-line arguments, run the script. Uses the DIALS option parser to extract an experiment list and parameters, then passes these to :func:`generate_mask`. Args: phil: PHIL scope for option parser. args: Arguments to parse. If None, :data:`sys.argv[1:]` will be used. """ # Create the parser usage = "usage: dials.generate_mask [options] models.expt" parser = ArgumentParser( usage=usage, phil=phil, epilog=__doc__, read_experiments=True, read_experiments_from_images=True, ) # Parse the command line arguments params, options = parser.parse_args(args=args, show_diff_phil=True) experiments = flatten_experiments(params.input.experiments) # Configure logging dials.util.log.config(verbosity=options.verbose, logfile=params.output.log) # Check number of args if len(experiments) == 0: parser.print_help() return # Run the script generate_mask(experiments, params) if __name__ == "__main__": run()
STACK_EDU
7.6. Wrap Content with Floating Elements HTML normally flows from the top of the browser window down to the bottom, one headline, paragraph, or block-level element on top of another. This word processorlike display is visually boring (Figure 7-12, top), but with CSS, you're far from stuck with it. You'll learn lots of new for arranging items on a Web page in Part 3, but you can spice up your pages plenty with one little CSS property The float property moves an element to either the left or right. In the process, content below the floated element moves up and wraps around the float (Figure 7-12, bottom). Floating elements are ideal for moving supplemental information out of the way of the main text of a page. Images can move to either edge, letting text wrap elegantly around them. Similarly, you can shuttle a sidebar of information and links off to one side. Figure 7-12. The regular flow of HTML is left to right, top to bottom, with one block-level elementheadline, paragraph, <div>, and so onstacked on top of the . By letting you break up this , the float property is one of the most powerful and useful tools that CSS offers. Its uses range from simply moving an image to one side of a paragraph to providing complete layout control over banners, sidebars, navigation bars, and other page elements. While you can use floats in some complex ( and confusing) ways, as you'll see in Chapter 11, the basic property is very simple. It takes one of three keywords, , like so: . Slides the styled element to the left, while content below wraps around the right side of the element. . Slides the element to the right. . Turns off the float and returns the object to its normal position. Floating an image is similar to setting the <img> tag's align attribute to either . That little bit of HTML is deprecated (Section 3.3), so use the CSS float property instead. Floated elements move to the left or right edge of the browser window, or of their , if there is one. You may have a box on the page that's 300 pixels wide and is itself floated to the right edge of the browser window. Inside that box, you've got an image that floats to the left. That image slides to the left edge of that 300 pixel-wide boxnot the left edge of the browser window. You can even use the float property with an inline element (see Section 7.2.4) such as the <img> tag. In fact, floating a photo to the left or right using CSS is a very common use of the float property. A Web browser treats a floated inline element just like a block-level element, so you don't run into the problems with padding and margin that normally trouble inline elements (see Section 7.2.4). You can also float a block-level element like a headline or paragraph. A common technique's to float a <div> tag containing other HTML tags and page content to create a kind of containing box. In this way, you can create sidebars, pull quotes, and other self-contained page elements. (You'll see an example of this in this chapter's tutorial.) When you float block-level elements, it's a good idea to set the width property as well. This way, you can control how much horizontal space the block takes up and how much space is available for the content below it to move up and wrap around the block. the order in which you write your HTMLhas a big impact on the display of floated elements. The HTML for the floated tag must appear the HTML of any content that wraps around the floated element. Say you've created a Web page of an <h1> tag, followed by a <p> tag. Toward the end of that <p> tag, you've also inserted a photo using the <img> tag. If you float that photo to the right, say, then the <h1> tag and most of the content inside that <p> tag will still appear above the photo; only content that the <img> tag will wrap around the left side of the image. 7.6.1. Backgrounds, Borders, and Floats To the consternation of many Web designers, backgrounds and borders don't float the same way as other page elements. Say you float an elementa sidebar for exampleto the right. The content below the sidebar moves up and wraps around it, just as it should. But if that content has a background or border set on it, then that background or border actually appears the floated sidebar (Figure 7-13, left). In essence, a Web browser wraps the text around the float, but not the border or background. Believe it or not, this is kosher, and how (according to the rules) it's supposed to work. Of course, you may not want to follow these rules; you might want to have the border or background stop when it the floated element (Figure 7-13, right). With a little CSS magic, you can do it. First, you need to add one rule to the style that has background or borders running underneath the float. Once you locate the style, add this line: ;. The overflow property (discussed in more detail in Section 7.5.2) makes any background or border that extends underneath the float disappear. Another approach is to add a borderline around the floated element; when you make the borderline thick enough and match its color to the background color of the page, the border looks just like empty spaceeven though it's covering and hiding the background and borderlines that are extending below it. Figure 7-13. In this example, there's an <h1> tag with a background color and an <h2> tag with a border (left). Adding overflow: hidden; to the style for the <h1> tag (right) the headline from appearing under the floating element (sidebar). 7.6.2. Stopping the Float Sometimes you need a way to tell a tag to ignore a floated element. You may have a copyright notice that should always appear at the bottom of the browser window. If you have a particularly tall sidebar that's floated to the left side of the page, the copyright notice might actually be drawn up the page and wrap around the float. Instead of appearing at the bottom of the page, the copyright is sitting up the page next to the sidebar. You want the copyright notice part of your page to to wrap around the floated element and instead drop below it. Other problems occur when you have several floated items close together. If the floated items aren't very wide, they float up and next to each other, and if they're of varying heights they can get into an unattractive logjam (see Figure 7-14, top). In this case, the floated elements float next to each other. CSS provides the property for just these types of problems. When overflow: hidden Fails property prevents backgrounds and borders from awkwardly running under floating elements (Figure 7-13). But nothing's ever that simple in the world of Web browsers. While this one line of code works for Internet Explorer 7, Firefox, Camino, and Safari, it doesn't work reliably in Opera (at least version 8.5), and Internet Explorer 5 and 6 for Windows just ignore it. Alas there's no apparent fix for Opera, but there's something you can do for IE 5 and 6. For those browsers, you need to add one additional rule: This is a Microsoft-only property that lets you enlarge an element on a page. In this case, though, it's just a weird way to force IE 5 and 6 to stop a border or background from extending underneath the floated element. (For more detail on why this zoom thing works, see "Got Layout?" in Section 11.1.) You may want to put the IE-specific zoom rule in an IE-only style (see the box in Section 7.6). You can even put it in a completely separate IE-only external style sheet (Section 14.5.2). You'll also find an example of this problem and its solution in the tutorial starting in Section 7.7.5. property instructs an element to not wrap around a floated item. By clearing an element, you force it to drop down below the floated item. Also, you can control which type of float (left or right) is cleared or force a style to simply ignore both types of floats. property accepts the following options: . The style will drop below elements that are floated left but will still wrap around right-floated objects. . Forces a drop below right-floated objects but still wraps around left-floated objects. . Forces a drop below both left- and right- floated elements. . Turns off clearing altogether. In other words, it makes an item wrap around both left-and right-floated objects, which is how Web browsers normally work. In the case of a copyright notice that must appear at the bottom of the page, you'd want it to clear both left-and right-floated objectsit should always be below other content and should never wrap to the left or right of any other item. Here's a class style that would do just that: Figure 7-14 shows how the property can prevent floated items of varying heights from together. All three photos in that figure have a right float applied to them. In the top figure, the photo of the tomatoes (1) is the first image on the page and appears at the far right edge. The second image (2) obeys the float set on the first image and wraps up to the left of it. The last photo (3) isn't wide enough to sit next to the second photo (2) but still to wrap around both (1) and (2). It gets stuck in the process. ; on the images prevents the photos from sitting next to each other (Figure 7-14, bottom). The clear applied to the second photo prevents it from wrapping up next to the first image, while the last image's right clear property forces it to appear below the second image. Figure 7-14. Top: Sometimes you don't want an element to wrap around a floated object. Bottom: Applying the clear property (in this case clear: right;) to each image prevents them from sitting next to each other. The clear applied to photo (2) prevents it from wrapping up next to image (1). Applying clear: right to photo (3) forces it to appear below photo (2). This business of left floats, right floats, and how to clear them sounds complicatedand it is. This section gives you a basic introduction. You'll see the subject again in Chapter 11 and eventually learn how to use floats in more sophisticated ways.
OPCFW_CODE
On Aug 29, 2005, at 5:30 AM, Finn Thain wrote: > On Mon, 29 Aug 2005, Grobian wrote: >> Kito wrote: >>> On Aug 27, 2005, at 4:32 PM, Grobian wrote: > At the moment, ppc-macos would need the collision-protect feature, but > once we have a feature for prefixed installs, that should be used > (by default, at least). >>> The user interface would need to be hashed out as well of course. >>> do you install/bootstrap it? > It would be nice to have ebuilds that could invoke the Darwin Build > Scripts (and merge the result on a ROOT). Yeap. already planned on using this to build a libSystem, etc. > Given such ebuilds, surely catalyst can bootstrap it. >>> Where is the local configuration data stored? This is an area that I >>> think would be acceptable to take some Mac OS specific indulgences, >>> such as plists for the main config data(prefix info, search paths, >>> etc), pkgs/dmgs to bootstrap/install, and I also think we should >>> the umbrella Framework mechanism when feasible. >> Wow, using plists would be a first start on getting portage widely >> accepted because it includes the buzz word XML. LOL. On a serious >> note, I think Apple has shown XML can work somehow. At least it >> has an >> open character, and it's great when you can 'script' your Keynote >> presentation by just doing string manipulation in a big XML file. >> So I would say, let's try to use this horrible XML on a pilot base >> small configuration files. Maybe we should do it better than >> Apple at >> some point because their XML does not always make use of the tree >> structure of XML. For XML I would say: only deal with it if it looks >> appropriate for the case and it is relatively easy to extract the >> (which is often very flat, as in the .plist files). > I reckon XML is important, though perhaps not in the way you > describe. As > I see it, where ever portage is deployed as a secondary package > it needs to consult the primary one. That means that there needs to > be a > standard protocol for one package manager to query another. I'm not sure I agree. I think this gets too close to a package.provided situation, portage will never know enough about another systems packages to map their functionality to its own. I think its preferable to let portage handle what it knows first hand before trying to force it data from a foreign host. >> Let's indeed make it a 'native' application for OSX users. Native in >> the sense of how it installs and looks like. I may give file >> locations a >> thought today, but maybe I should know the Framework stuff a bit >> first. Can we install the whole Gentoo stuff in a Framework? or >> is it >> better to just use a shortest path algorithm and end with /gentoo? >> Actually the user should be able to select a disk to install to > I reckon get it working (with an "upstream darwin" profile) in a > stage first (which could double as a boot disk). I want to start a lot smaller than that first. Think stage1. You can't boot a stage1, its a just the corelibs and a toolchain pretty >>> The repo should never ever never ever EVER rely on anything it >>> doesn't know how to supply itself with, whether that be a tool, a >>> library, knowledge of a filesystem, a host, a protocol, whatever. It >>> doesn't matter how it gets it, it just needs to know at least one >>> to get it. This implies of course proper implementation of ferringbs >>> BDEPENDS idea. >> so, you mean if there is something (a filesystem) portage hasn't >> installed, then we should create the proper handles to deal with >> the OS >> provided one? As in create a compatibility tool for "fdisk.HFS >> +". I'm >> a bit clueless on how exactly you want to achieve what you want. >> I don't understand correctly what you want exactly too. > This is how I would handle that case: > If a program (say fsck_hfs) is available upstream, build it with Dawin > Build, merge it with portage (I expect an eclass for darwin build is > required, and of course an ebuild for diskdev_cmds.) About what I was thinking... >>> Binary packages have to work. Thats a fun one. But all this done >>> properly, should allow for at least a little more consistency >>> than the >>> current situation. I'm still sold on using xar for this >>> despite the >>> rather heavy deps (they are deps I would want in most any >>> anyway - libcurl,ssl,libxml,zlib), and it just fits the bill too >>> imho, support for most standard arch specific data such bsd flags, >>> ACLs, resource forks ,.etc as well an excellent TOC structure >>> that is >>> perfect for storing environment settings and package metadata. >> Again XML. If you do it XML, you have to do it all XML, something >> apparently understood. It appears we will have the blessings if >> we use >> XML, so I think we should. In the end we can always dump all that >> into MonetDB/XQuery to have extremely fast querying over all the >> tree based. I think it would seriously be the first project to use >> XQuery and XML for it's configuration. However, if you come to >> think of >> it, it is a tree, an extensible tree, and might be a much more a >> choice than it appears to be. > Why not use one of the open source .pkg tools to generate binary > Kito tells me he has already been able to unpack .pkg format and > it via portage. Thats just to get extract files...I'm talking about binary packages that portage can use. I don't like the current tbz2 hack. I have no interest personally in producing packages with a proprietary format for portage. Be a nice feature for OS X users and devs sure, but thats more like an add-on bells-and-whistles type feature... patches accepted ;) >>> Avoid package.provided or anything of its likeness like the >>> This repo needs to describe a toolchain from the ground up, >>> of the host. "What CAN it build and how?", not "What WON'T the >>> host OS >>> let me do". >> Uhm, yes please! > Hear, hear! >>> Keep the number of ebuilds to a bare minimum. We can't get too >>> carried away with maintaining a complete tree, or we risk >>> drifting too >>> far downstream and the zealots pushing Darwin/Mac OS support out of >>> the main tree entirely. That would be bad. This can't go in the >>> direction of a fork, just an experimental branch that will be merged >>> back in at some point. > IMHO, this sounds like a "gentoo-darwin" sub-project to gentoo-alt, > along-side os x and bsd. It isn't really a fork except in as much > as the > profile arrangement doesn't really accomodate work on darwin proper > then the profile arrangemnet is flawed anyway: it only exists this way > because of the package.provided crutch). I was looking at it more as a place to develop some new portage features...Gentoo/Darwin has always been lurking, this is more in the area of just getting offsets working. > firstname.lastname@example.org mailing list email@example.com mailing list
OPCFW_CODE
New installation Jaunty constant permissions problems John L Vifian jongleur at liripipe.com Tue May 19 17:12:20 UTC 2009 On Tuesday 19 May 2009 9:25:50 am steven vollom wrote: > > Never use "sudo dolphin", use "kdesudo dolphin" instead. > Does this apply for all applications that are viewed and used while in the > GUI and not just graphical applications like gimp, digikam, etc. Or are > all the office applications, browser, email, etc. considered graphical too. NO! You should only rarely need to use sudo or kdesudo in the first place. Most programs that you run are designed to be run as a normal user, in your case as "steven" and running them as the superuser (root) will cause problems. Use the regular Kmenu to start your applications. > > If you want to change all your config files and all the other stuff in > > your home directory to be owned by you again, you can run the command > Nils, a friend who prefers anonymity wrote off-list that my problem was > when I set the mount point for my /home partition as /home/steven, I > created the permissions problem. If that is the case, would it be better > and less likely create future problems, if I reinstalled and set the 'home' > partition as /home instead of /home/steven? > > sudo chown -R $USER: $HOME This should fix your permissions problem, however it doesn't tell us anything about why you are having the problem or what the scope of the problem is. Changing where you mount a partition shouldn't have anything to do with permissions, although mounting it like your friend suggests might solve other > I really need to learn this stuff, Nils, if I write it down, I may never > remember that it is recorded or where I put it. Still, I definitely will > write it down. I will make a file called Ownership/Permissions because > both those words may trigger the memory. Thanks friend. Steven one thing you might try is understanding what you are doing and why rather than approaching this as magic bits of code you paste into a terminal that fixes things. In this case you probably already know that sudo runs the command following it as the superuser, so what does chown do and why would you want to do it? What is the effect of the -R that follows it? What is this $USER thing? Why does it have a colon after it? And what is $HOME. You can find out and understand what you are doing by typing "info chown" into your terminal.. Use PageUp and pageDown to navigate and "q" to quit. You could also use man <command> instead of info, or just google chown or whatever command you want to understand. You should really do this before you run a command. People are trying to help you here, but it is easy not to fully understand your problem and to suggest solutions that will make matters worse. To help you a bit $HOME & $USER are environment variables and hold the path to the home directory, and the user's ID. you can see what they hold by typing echo $HOME and echo $USER into your terminal . Now what does echo do? If a woman has to choose between catching a fly ball and saving an infant's life, she will choose to save the infant's life without even considering if there are men on base. More information about the kubuntu-users
OPCFW_CODE
ECCOMAS Multibody Dynamics Conference 2019 July 15-18, 2019, Duisburg, Germany Freshest Pre-Conference Information Information for Speakers and Session Chairs Please prepare your talk such that you have 15 minutes for presentation and 5 minutes for discussion. The total slot for presentation and discussion is 20 minutes sharp. Please abide to this requirement not as an authoritative prescription but as a means of allowing attendants to commute between the (four) parallel sessions in a predictive manner. Speakers can bring their own laptop or handheld device with one of the possible connections VGA or HDMI. Also, there will be a computer in the lecture room where speakers can upload their presentation supporting Windows 10, MS Office 2016, Adobe pdf Reader DC, VLC-Player, Windows Media Player, Internet Explorer, and Firefox. There will be an assisting person in the lecture room 30 minutes prior to each session start where speakers are kindly asked to present themselves before the session starts to test the own device connection or the upload and working of the own file on the lecture-room computer and check the box of attendance for the session chair to know. On morning sessions directly after keynotes, the 30 minutes will be split in 20 minutes before the keynote and 10 minutes before the session. Speakers are also asked to kindly show-up 5 minutes prior to the session start in order to introduce themselves to the Session Chair. Session Chairs are kindly asked to abide to the talk schedule sharp. This means that (a) no talk should exceed the 20-minutes slot for both presentation and discussion, and (b) in case there are no-shows, the slot is left empty (letting attendants go for a coffee). This is kindly asked to be handled strictly. Session Chairs are kindly asked to show-up in the lecture room 5 minutes before session start to be able to identify speakers before session starts. There will be only one Session Chair per session, thus in case of inconvenience please contact the conference desk for organization of a substitute. There will be an instruction sheet and a list of papers and announced speakers with affiliation for each session in the session room. Information for Participants in General As the conference is within the university premises, internet access is available at the conference via eduroam. If you already have a working eduroam account, you can use it. For the case you do not have a working eduroam account, we will be providing you with a user name and a password in your conference kit and will have eduroam Wi-Fi configuration instructions for a selection of devices (Android smart phone, iPhone/iPad, Windows laptop, MacBook) at the conference desk. For your own convenience, please install the following certificate https://www.pki.dfn.de/fileadmin/PKI/zertifikate/T-TeleSec_GlobalRoot_Class_2.crt before coming to the conference, as it is necessary in order to use eduroam, and you might not be able to access it when arriving in Duisburg. We are happy to announce that during conference, we will offer free lunch (with daily varying local and German specialties) for all participants. Lunches will be served in the “Multi-Body Tent”, which has been specially erected (not built) for the conference just across the building where the conference rooms are located. You will find the location on the map and the multi bodies inside the tent. We will be serving in the lunches and in the banquet buffet choice with English tags describing each dish, and there will be a rich choice from vegan to nagev. Social events (ship cruise & evening reception event in Düsseldorf on Monday July 15, sightseeing tour & Banquet on Wednesday July 17) are all planned for all conference attendants. On-site registration/fee payment If you were not able to carry out the payment of your conference fees, or if you or your relatives or friends decide to participate spontaneously after seeing the social program, there will be a cash desk at the registration counter where you can pay. Please bring counted cash in Euros in the amount specified for late registration with you. Regretfully we will not be able to offer credit-card-payment at the registration counter, due to disproportionally hereto required administrative effort with respect to the number of expected desk payments. If you have already sent the bank transfer or credit card payment, please bring a printed payment slip with you.
OPCFW_CODE
M: How four underdogs took on M.I.T. in underwater bot championship (2005) - curtis http://archive.wired.com/wired/archive/13.04/robot_pr.html R: jbrun And then all of them could not go to college due to immigration status and lack of money. They do manual labor now. Great American Underdog Story. R: munin even when they raised $90k of money from donations after their story was in Wired, two of them went but both dropped out. I think they work at a catering company now? it is a very american story. and of course, most likely the team from MIT they beat are all now working at high tech companies or in graduate schools. R: spiritplumber "The point of the game is not to win the game. The point of the game is to take the prize home." Reminds me of a mini-competition that happened between some San Antonio unis about who got to do a NASA internship. The girl who got 3rd place went because the winner and runner-up were not American citizens (I was the runner-up). We ended up collaborating on the design remotely. In later life, I had at least one person tell me that I couldn't put that on a resume. This person was not hurt. R: gohrt NYT covers these engineers and their immigration status last month: [http://www.nytimes.com/2015/01/16/opinion/the-cruel-waste- of...](http://www.nytimes.com/2015/01/16/opinion/the-cruel-waste-of-americas- tech-talent.html) For Wired: > "What will you say," [Ira Mehlman] asks, "to an American kid who does not > get into a state university and whose family cannot afford a private college > because that seat and that subsidy have been given to someone who is in the > country illegally?" I would say, "We need to restore state college funding to the level it was when Ira Mehlman went to college; to make practical advanced education accessible to all, because it's the best investment we can make in our nation" R: pthreads Hasn't this been read and read over and over again? R: thetmkay A film has just been released in select US cinemas based on their story Spare Parts (2015) - [http://www.imdb.com/title/tt3233418/](http://www.imdb.com/title/tt3233418/) R: 6stringmerc Yes, I have seen the film and it is quite good. The story itself and the cinematic execution are definitely above par, I'd give it 4 out of 5 stars for entertainment and quality. Also, I'm reluctant to call it an underdog story. It's more a "coming of age" story, which is a different thing. There are many, many emotional aspects to the film (family dynamics - multiple generations in a home, absent father or a deported mother) which serve to enhance the overall themes of what happened. Lastly, it's one of the few films I've seen recently that doesn't rely on CGI to show off important parts of the story. As the credits indicated, there were teams of robotics professionals who built the units used in the film. I recommend the film and will buy it to have in my house to put on in the background when the mood strikes me. Others that fit that bill for me are _Rush_ , _Shawshank Redemption_ , _The Crow_ , and _The Big Lebowski_. YMMV. R: julianj I don't know why, but I kept reading this as underwear bot championship...
HACKER_NEWS
Multiple audio streams don't seem to work as expected Hi @AlexxIT, I've been working on replacing rtsp-simple-server with go2rtc in my project and, overall, it is working really well. My project uses RTSP and dynmically starts streams from Ring cameras using an external script so I am using the exec source which starts a publisher that sends data using FFmpeg as an RTSP publisher. This works just fine, however, there's one issue I haven't been able to figure out. For maximum compatibility I have ffmpeg configured to provide two audio channels, one encoded as AAC and another encoded as Opus (the video is H264). This way clients that requires AAC (such as HLS in Home Assistant) can use the AAC stream, while clients that need Opus, such as WebRTC, can use the Opus stream. With rtsp-simple-server this works perfectly. If I open the RTSP stream in a tool like VLC, I can see both the AAC and Opus channels listed in the codecs section. However, with go2rtc, it seems that, even though I am publishing two channels of audio as part of the stream, only the first one is forwarded to the client. So, for example, if I make AAC the first one, the WebRTC in Home Assistant will have no audio because it picks the AAC audio instead of the available Opus track. If I open the RTSP stream in VLC, it will only show whichever audio channel I place first. I thought perhaps this is a configuration issue on my side, I really just want go2rtc to copy and make both audio streams available to the client, just like rtsp-simple-server does, but I can't figure out any way to do that with the exec source. I assumed that the exec source would simply forward all channels that are being sent. I need to go back and test -rc5, I somehow feel like the problem didn't exist then, because I only just noticed it, but I've only just started really testing all the various scenarios that users might have so perhaps I missed the behavior previously. By default go2rtc outputs only 1 video and only 1 audio to RTSP. You can choose which codec you want with the filters: rtsp://<IP_ADDRESS>:8554/camera1?video&audio=aac. The problem is that with an RTSP stream, you don't know which codec the client can play. With WebRTC or MSE you know which codec the client can play. Autoselect should work there. Of course I can output all the tracks in RTSP. I just don't know if that would be the right thing to do. The problem is that with an RTSP stream, you don't know which codec the client can play. Exactly, which is why I provide both tracks and let the client choose. In my case it's even the same RTSP client because in HA there's only a single RTSP URL, but they may use HLS or WebRTC, and you need a different codec based on which one is used. Of course I can output all the tracks in RTSP. I just don't know if that would be the right thing to do. I'm not 100% sure if it's the right thing to do either, as I'm certainly not an expert here, but I've used a number of RTSP servers over the years and they've all always offered the client all tracks available from the publisher. Based on my limited understanding of the RTSP protocol, it basically uses SDP to describe the available streams to the client, so I think that it is generally expected that all streams are offered, and the client chooses which streams it would like to decode based on what it supports and its preference. It sounds like I could get similar behavior with go2rtc, but only if I have users change their RTSP URL to explicitly request both audio tracks. I'll play with this, but it definitely makes the transition more difficult. Thanks for your consideration. Problem will be with multisource streams. Because go2rtc will start all of them before RTSP response. I'll think about this concept Ah, makes sense. Maybe a server-side configuration option that enables this behavior or perhaps enable only if there is a single source. I'll live with whatever choice you make. Thanks again for this great project, I can see a ton of potential here. rtsp://<IP_ADDRESS>:8554/camera1?video&audio=aac&audio=opus I did test this and it works just fine, so I guess worst case I can tell users that have audio issues to update the RTSP URL, not the end of the world. However, I wonder if perhaps you could allow defining a default set of query parameters for each stream to override the default behavior. I took a quick glance at the code and, while I'm far from a go expert, it looks like it would be pretty easy to implement and wouldn't change any existing default behavior. I think new default_query option may be useful for you https://github.com/AlexxIT/go2rtc#module-rtsp Thanks a lot @AlexxIT!
GITHUB_ARCHIVE
|Note: This tutorial assumes that you have completed the previous tutorials: ROS tutorials, Connecting to an IEEE 1394 Camera, Setting Dynamic Parameters.| |Please ask about problems and questions regarding this tutorial on answers.ros.org. Don't forget to include in your question the link to this page, the versions of your OS & ROS, and also add appropriate tags.| How to Configure IEEE 1394 Camera FeaturesDescription: This tutorial explains how to set IIDC feature parameters for an IEEE 1394 digital camera. Tutorial Level: BEGINNER Next Tutorial: Camera Calibration If the camera produces color images, show the color output from image_proc: $ rosrun image_view image_view image:=camera/image_color For a monochrome camera, show the image_mono topic, instead: $ rosrun image_view image_view image:=camera/image_mono The IIDC specification lists a number of optional features that may be provided by conforming camera implementations. These are currently supported by the driver as parameters: While the standard mentions the names of these features, it does not define what they do. Consult the technical specifications of the camera: manufacturers have great latitude in determining which features to support, what values they can take, and the meanings of those values. Feature Control States For each supported feature there is a corresponding auto_ parameter (auto_brightness, auto_exposure, etc.) providing user control. Different cameras handle these controls in various ways. Most do not support them all. The possible control states are: Off (0): turns the feature off Query (1): returns the current mode and (if possible) the value of the feature, with no change to the device Auto (2): the camera sets the value continuously Manual (3): sets a specific value from the corresponding parameter OnePush (4): the camera sets the value once, then holds it constant None (5): the camera does not provide this feature The Query control is not an actual camera mode, but rather a command to the driver that returns the current camera mode and possibly its value (if available). This is the default initial setting of all these control parameters, which causes all the current camera settings to appear in the GUI. Many cameras provide an Auto mode for some features, adjusting them to reasonable settings. It's worthwhile to try setting the control state to Auto. If that feature does not support Auto mode, its control state will quickly revert to the previous setting. If the Auto settings are not to your liking, Query the current value, then change the mode to Manual. If the feature has no Manual mode, that update may fail, but it should work with most devices. If Manual mode is available, try different values using the slider, or by typing a number into the value box followed by the Enter key. Camera features advertise minimum and maximum value limits. The driver is not currently able to expose those limits directly in the GUI. But, values set via Manual mode will be clipped to fall in the allowed range, the modified value being updated in the appropriate GUI entry. For some computer vision applications it is confusing when the camera changes its settings during operation. A few cameras provide features with a OnePush mode to address this problem. OnePush causes the camera to adjust the feature automatically, then lock in the value selected. This differs from Auto, which will continue to make adjustments if lighting conditions change. Although many cameras do not provide OnePush, it is often possible to perform the same task manually. That only works if the camera provides both Auto and Manual modes for the feature in question, and can return the correct value for a Query command: Wait until the picture stops changing, then Query the value Select Manual to lock in that value Note that Auto mode generally changes settings very gradually. The feature value may continue to change slightly even after the picture appears stable. If that matters for your application, repeat the Query and wait for the value to stop changing before switching to Manual. Using the Parameter Server All these parameters can be configured via the normal ROS parameter server mechanism. Both the driver and the dynamic reconfigure GUI get their initial values from the parameter server, and update it whenever a value changes. For IIDC features, don't forget to set the control state to Manual (3), otherwise the corresponding value will be ignored unless the device is already in the Manual state when the driver starts. To set brightness to 256 on the command line: $ rosrun camera1394 camera1394_node _auto_brightness:=3 _brightness:=256.0 To set the same value in a launch file:
OPCFW_CODE
Question about how magnets work So I was watching this vid- https://www.youtube.com/watch?v=7nhmJPMi4FU&t=1m0s Please watch from 1:00 to 2:00 Now I have some questions- 1) He says that the north pole of the magnetic field will be on the top of the electron if the electron spins in the clockwise direction. But how can we know what the top of the electron is? I mean, how do we figure out its top if its a ball shaped particle?(actually I know that its a wave function and yada yada ya) 2) Can anyone please explain the paired electrons thing? I don't really get it. Also, can an atom have more than 1 paired electrons? About your first question: Its basically the right hand thumb rule. Consider any spinning ball with one point marked as 'top of the ball', suppose you are looking at it from the top and it is spinning anticlockwise. Then the magnetic field will be towards the top. What if you chose the other point as 'top'? Then when you look at it, it will spin clockwise and the magnetic moment will be towards the bottom (which was earlier the top). So no matter which way you look at it, the magnetic field is in the same direction. Another way to state it is that curl the fingers of your right hand in the direction of rotation and the thumb will point in the direction of the magnetic field. Note: we can apply the same classical theory to an electron and say its spinning. But according to quantum mechanics, it has 'spin angular momentum' But it isnt actually spinning. About your second question, You will need to study basic concepts of chemistry to understand what are orbitals and how electrons are paired. Look up "Hund's Rule" on the internet. edit: while writing the answer, i forgot that an elecron is negative in charge and so the field will actually be in the opposite direction which the right hand rule gives. Further explanation: It has been experimentally observed that all magnetic fields are produced by currents*. Straight currents produce circular magnetic fields (see diagram below) and circular currents produce straight magnetic fields (see diagram below). The direction of magnetic field around a straight current is given by one of the right hand thumb rules "Point thumb towards current; curl your fingers. The fingers now give the direction of curling om magnetic field." A current loop produces a magnetic field similar to a hypothetical "short magnetic dipole" with "north and south poles". (Please remember that all magnetic field are produced by currents and thus the poles are imaginary. No magnetic [mono]pole has been found. (at least till now)). The direction of poles of a current loop is given by the right hand thumb rule "curl fingers towards current; thumb points north". So since a single current loop produces a field like a short magnetic dipole, we might expect a solenoid with lots of current loops to produce a field like a bar magnet, and this expectation is correct. But since I said that all magnetic fields are due to current, you might wonder why a magnet produces a magnetic field. Does the magnet have currents flowing in it? The answer is yes, all atoms in a magnet have unpaired electrons spinning** and thus they all behave like permanent current loops. Now the thing is that we define direction of current to be "direction of motion of positive charges". So if the electron rotates** anticlockwise, the current is clockwise (opposite to direction of motion of electron). So the right hand thumb rule now says "curl fingers opposite to direction of motion of electron; thumb points north." So I hope you have now understood both the mechanism by which magnetic fields are created by a magnet, and the right hand thumb rule(s). Diagrams to help understanding: Magnetic field of a current loop *Actually changing electric field can also produce a magnetic field but right now I'm talking about magnetostatics only. **An electron does not actually rotate or spin according to Quantum Mechanics, but you can think that it is rotating both about its axis and in its "orbit". oh, ok thanks!! But one question, doesnt the direction of the thumb in the right hand rule point towards the direction of the current and not towards the magnetic field. There are many ways to use the right hand thumb rule. In one version, we curl our fingers towards magnetic field to get the current and in other we curl our fingers towards the current to get the magnetic field, and in still another, we curl fingers from the current to the field to get the force etc. Basically all these arise from the definition of the vector cross product. If you look at the Maxwell's equations, you will realise where these right hand rules come from. One more thing: see edit. edit: while writing the answer, i forgot that an elecron is negative in charge and so the field will actually be in the opposite direction which the right hand rule gives. I don't quite understand..a magnet is neutral but we can still use the rule on it...then why does the charge make a difference? @MartianCactus It seems that you do not know the mathematical equations of Electrictity & Magnetism. I would recommend you to read Feynman's lectures to understand it. However I will still try to explain it in my answer. can an atom have more than 1 unpaired electrons? If yes then what will happen if they don't share the same spin? @MartianCactus Yes, an atom can have more than 1 unpaired electron (I am sure about this). They will always have the same spin (I think. But you should confirm it on chemistry stackexchange). thx for the edit!! But I still don't get how the magnetic field of a solenoid is defined.. For that I guess you should ask another question on this site . . . (otherwise this answer shall deviate too much from tge original topic) First question Electrons have intrinsic angular momentum which does not arise from its motion in physical space (that is why it cannot be derived from Schrodinger's equation) and is called (fortunately or unfortunately) the spin angular momentum. As this angular momentum has a direction, the isotropy of space is destroyed and now you can define an axis, or a direction. With reference to the figure drawn in the video link, the spin angular momentum will be pointing vertically downwards, and the top of the electron simply means the tail of the spin angular momentum vector. The magnetic moment of the electron is aligned antiparallel to the angular momentum, in this case the spin angular momentum. Thus, the magnetic moment of the electron is pointing vertically upwards. There are no magnetic monopoles, but sometimes they are convenient to picture magnetic field lines. So in terms of magnetic monopoles, it is said that the North pole is above the electron. This simple means that, for given spin angular momentum, the magnetic dipole moment is vertically upwards. Second question Yes, higher-$Z$ atoms have paired electrons. They can have more than one paired electrons. They can have paired and unpaired electrons also. Kindly see some chemistry textbook how the electrons are filled in different orbitals. They give a simplified picture of the filling of energy levels and should be sufficient to address your concerns about the magnetic moment of an atom. This video may help (honestly, I have not seen it though). what happens if it has more than 1 unpaired electrons but the spin is in different directions? Which one will the atom have? Well, the electron doesn't spin on its axis. So, when he says spin, he's basically wrong. But about the pairing thing, it means two atoms share one electron each to remain stable. Pairing, when talking abou magnetism, means that magnetic properties of an atom depend on the number of electrons...if all electrons are paired it means that all the states are filled, we have an even number of electrons. If you add one more electron now you have an odd number of them and that lonely guy on its own gives rise to a net magnetic moment of non-zero value. Top or bottom of electron is decided dependin on its magnetic moment projection i guess, that part sound a bit clumsy. +Žarko Tomičić can an atom have more than 1 unpaired electrons? If yes then what will happen if they don't share the same spin?
STACK_EXCHANGE
7 Trends You May Have Missed About pastes An index file within a relational database is a table that contains historical data. It is sorted according to the relation to other records. In simple words, a relational database is used for information retrieval. An index tells the database administrator which order the information was accessed and changed. An index is vital to the overall security of a database, performance and reliability. But, index files can sometimes be too big to be able to fit in the available space of the main memory. Index files, sometimes known as pastes, are widely used in the vast majority of modern databases. They facilitate the sorting of large amounts of information that are related by identifying relationships between the documents. The user can paste the contents of one Document Search database into multiple index files to look for "headline" in many documents. This lets them bypass the need to manually input the text or content words of each document. This reduces time and lets users eliminate details such as text while searching for important phrases or combinations of keywords. One of the advantages of pasting information is that if the document you are seeking is already available in the database, you can find it using the hyperlinks. Index bins, also referred to as past positions are a form of index which maintains an inventory of changes that occur in a specific column over a period of time, making it easier to identify and keep track of. While traditional pasting records alter in a column, the process of incrementally pasting records changes in a single column over minutes or hours, day or month. Incremental paste systems utilize an algorithm which identifies small changes in order to make it easier for single-point changes to be identified. For instance, a user who inserts new content onto a web page will find it in the "log" that is the old form they completed. The incremental paste system is able to take this information and link it with the appropriate label so that it is easy to recognize later. The ability to display any number or documents within tabs is a further benefit of incremental paste systems. When the user enters text in a box, the system will identify the box and then open it to type in the needed text. The system is able to mark the area where the cursor was placed after the user has completed entering the data. The system then puts the new text on appropriate lists of positions and saves it. This process is repeated as the user adds characters within the document, and adds the required text on every page. Incremental indexing can be applied to multiple pages at the same time. The page that marks the beginning of a document's first page is known as "start". All pages that follow it are referred to as "finish." If a document is saved to a file , and the user copies it into an application it will be opened in its native format. Index allows you to select the application you want and then utilize that application to open and edit the text you've selected. Every document can be opened using the application of its choice. This allows you to choose the most appropriate one according to the format it supports. Indexing can be improved with incremental paste. It makes sure that the order of pages doesn't get lost when underlying documents change. The results of the index are constantly up-to-date, which means users can view them in their native application. Incremental paste comes with the benefit that users can see the indexed results before they are made. This allows users to judge the importance and precision of the text. Sometimes, it's difficult to find the right information from a large number of pages. Integrative pasting can be used to index a single page. FMR MS MVP has many advantages. One advantage is that every document that will be indexed will be scanned to ensure that it is made available to indexing systems. It allows one document to be linked to other documents using text strings. It can also join multiple documents into one to index them.
OPCFW_CODE
Fix DT header row span This PR is a replacement of https://github.com/tiagobento/kie-tools/pull/65 The effect of changes in this PR, should ensure, that the DMN Decision table headers have two different row spans. Input columns and annotation columns headers should have rowSpan equal to 2. Output clause columns should have rowSpan equal to 1. before after manual test checklist [x] selecting just input headers [x] selecting just annotation headers [x] selecting top level output header [x] select single 2nd level output header [x] select multiple 2nd level output header [x] select firstly top level output header, still drag mouse pointer, and add 2nd level output header [x] select firstly 2nd level output header(s), still drag mouse pointer, and add top level output header [x] start selecting input headers, still drag mouse pointer, add top level output header [x] start selecting input headers, still drag mouse pointer, add 2nd level output header [x] start selecting input headers, still drag mouse pointer, add top level output header, and add annotation header [x] start selecting input headers, still drag mouse pointer, add 2nd level output header, and add annotation header [x] start selecting annotation headers, still drag mouse pointer, add top level output header [x] start selecting annotation headers, still drag mouse pointer, add 2nd level output header [x] start selecting annotation headers, still drag mouse pointer, add top level output header, and add input header [x] start selecting annotation headers, still drag mouse pointer, add 2nd level output header, and add input header Hello, @jomarko ! Thanks for the PR! Manual tests: Issue 1: It's a small issue. In this image, if I navigate with the keyboard in "input-3" to the right, I go to "Decision-1". If then I press "down" I'm not able to go to "output-1": Issue 2: Strange behavior while selecting cells after a output and a annotation is added. Steps to reproduce: 1. Create a Decision Table 2. Add an output to the right 3. Add an annotation to the right 4. Try to select by drag from annotation-1 to input-1 Video: https://user-images.githubusercontent.com/7305741/228958065-8b95d55d-2fe9-43a1-b06e-99405f4d391d.mov @jomarko Made a review testing the PR, found some issues: The "Ctrl/Cmd + A" command to select all the cells is not working. When you hit "Ctrl/Cmd + A" on any row, the headers are selected. If you hit "Ctrl/Cmd + A" when the active cell is on the last row, it kinda works, but the rowIndex cells are being selected too. https://user-images.githubusercontent.com/1584568/230963505-f96382ae-e8b0-4a95-9781-f24a80b144e5.mov Mouse selection has erratic behavior when mixing two rows os cells. Notice how sometimes it selects two cells and sometimes it selects one. https://user-images.githubusercontent.com/1584568/230963860-1478ea82-e701-4a50-8834-ecd2fe4bfb59.mov Mouse and Keyboard selection is behaving differently. Notice how it's possible to expand the selection using the Keyboard when the rowIndex cells are selected, and how it's not possible to do the same with the Mouse. NOTE: By design, if you start a selection on a rowIndex column, you can't expand it to "normal" cells. That's how it is currently on the bee-reisizing-perf branch. https://user-images.githubusercontent.com/1584568/230963961-d8ef2772-fefe-48cb-87ce-9981a4f3daed.mov Keyboard navigation is weird on the header. Sometimes the activeCell disappears and reappears after navigating more. https://user-images.githubusercontent.com/1584568/230964192-5b6147ed-c6a0-4629-8b69-a1f6ce556f2c.mov Weird behavior when selecting header cells with the Keyboard. Notice how when I expand to the "Annotations" column, the outputs that were previously selected are removed from the selection. https://user-images.githubusercontent.com/1584568/230965274-fa5f8103-d05c-4407-b1dc-786edf096c37.mov rowSpan'ed header cell borders are not consistent with the rest of the table. Notice how the "Inputs" cells have a lighter border than the rest. From the code, I can tell that this new mechanism only works when there's one or two levels os nested columns. As mentioned previously, this component is the heart of many current and future initiatives. Even though we don't have a use-case for rowSpan=2 now, we most likely will have in the future. See SceSim, for example. If we're adding rowSpan capabilities, we need to do it in a way that will work for any possible case. See https://github.com/tiagobento/kie-tools/pull/89/files#diff-31e23eba3bb1e53d59a590fb00c694fb49d0ef9b3da816a28645d67c31683837R311 GENERAL COMMENTS The original intent of this PR was to make BeeTable capable of rendering rowSpan'ed columns, depending on the configured column structure. Although some unrelated changes are welcome (i.e., i18n, Java function hint fix), some changes that it introduces alter the original behavior of BeeTable, causing unpredictable and unstable behaviors that harm the overall experience. As mentioned before, a good way to approach the problem would be assembling a data structure that would hold rowSpans for each column. It's not possible to automatically determine the rowSpan of a column without looking at the entire column set. At least I'm not aware of any greed algorithm that can do it. The original behavior of the BeeTable includes some design choices that were made when the selection mechanism was created. Such behaviors are easily observed on the bee-reisizing-perf branch. Cells are divided in three categories: Header cells Normal cells rowIndex cells A selection cannot mix cell categories. I.e., a selection either contains only header cells, only normal cells, or only rowIndex cells. Header cells cannot be part of the same selection if they're in different rows. Keyboard navigation is locked to its selection domain. I.e., it's not possible to move from a normal cell to a header cell using the keyboard, and vice-versa. This applies to rowIndex cells too. Pressing "Ctrl/Cmd + A" on any cell will expand the selection to the most cells it can, without breaking the rules above. Cells that are both rowIndex and header cells (E.g., controller cells) are dead-ends, meaning that if you select them with the Mouse (it's your only option, btw, as you can't reach them with the keyboard), you cannot navigate away nor expand the selection using the keyboard. These design choices have a lot of caveats, and they were made considering a fine balance between good UX, simplicity of implementation, and correctness. Of course there's room to improve, and we will do so, I just don't think this PR is the right place to do it. This PR changes these behaviors, on top of adding the rowSpan capabilities, making its scope much larger, and thus introducing unpredictable behaviors. SUGGESTIONS Remove the changes made to the selection mechanism, as most of the problems I found during my review were related to the selection. Research or develop an algorithm that will compute the rowSpan of each cell prior to rendering, for any column structure with any depth. Assign rowIndex equal to the bottom-most rowIndex that a cells spans into. This will most likely satisfy the existing selection mechanism. E.g, on the Decision Table, input columns will always rowIndex === -1. I expect that the only cell with rowIndex === -2, which is the "Outputs header" will become a dead-end cell, as there are no other header cells on its row. Fix the visual glitches, like the missing borders. Let me know if there are any questions! Build fail doesn't seem to be related to my changes:
GITHUB_ARCHIVE
Installation takes only a few seconds. Just insert the CD and follow the instructions, and you're ready to go. When you launch the program, which resembles Windows Explorer, drag-and-drop convenience allows you to copy files into and out of a CuteZip file archive. But because CuteZip integrates so seamlessly with Windows Explorer, you can access most common archiving tasks there, such as creating a new archive or extracting the contents of an existing one, with a simple right-click of your mouse. CuteZip has a tabbed interface that lets you open and work on multiple archives at the same time. A convenient file-compression history remembers the last four opened archives for quick access. CuteZip supports most major compression formats, including ZIP, CAB, GZIP, TAR, PAK, and PK3 and decompression for ACE, ARJ, and RAR. It also supports self-extracting files so that users without CuteZip can uncompress documents no matter what format they are using. Like PKZip, CuteZip also allows command line archiving. Going above and beyond your ordinary zip utility, CuteZip lets you slice up large archives, and it automatically requests new discs when one is full. With its sophisticated encryption feature, CuteZip can either encrypt archives in the widely used PKZip-compatible format (165-bit DES encryption) or use more secure 128-bit Twofish encryption. Ordinarily, a greater number of bits leads to stronger encryption, however, Twofish is harder to crack by design and works on more hardware platforms than DES. CuteZip performed admirably in our compression tests. We tested CuteZip's four levels of compression on a 900MHz Pentium III system with 256MB of RAM, running Windows XP. While comparing the different compression levels, we found that overall, the differences in archive sizes were small compared to the difference in time it took to compress the files. To illustrate: Using its fastest and lowest compression level, CuteZip archived a 55MB file in a blazing 10 seconds (down to 19.2MB) and a 1GB file in less than 6 minutes (down to 536MB). This is significantly faster than PKZip 5.0 Professional, which took 20 seconds to compress a 55MB file into 18MB archive and 7 minutes to zip a 1GB file into a 523MB archive. Documentation and support CuteZip offers a built-in help system and extensive online documentation, both of which thoroughly explain most functions of the program. You can also turn to the searchable knowledge base at the Globalscape Web site, although the information there is almost the same as that in the online manual. As for live technical support, you can get it for free via e-mail, but it lasts only 60 days after purchase. If you need telephone support or more technical support after the first two months, you'll have to pay. Globalscape offers a flat rate of $25 per phone call. Luckily, CuteZip is intuitive to use, and it worked flawlessly in our tests, so you'll probably get by without phone-based technical support. Overall, CuteZip has a simple and easy interface and all the standard features you would expect from an archiving program, including multiple levels of compression, self-extracting files, Windows shell integration, disk spanning, and even advanced encryption. If you need an affordable and secure way to compress your data, CuteZip is an excellent choice.
OPCFW_CODE
I have an older model WD World Book (White casing w/ Blue Rings) that is a 1 TB capacity. It stopped responding as it had many times over the years . I finally could not get it to respond on my Windows 7 machine. WD support was pretty friendly and after years of owning it , they said it was still under warranty and I could replace it at no extra charge. Except I needed to recover the data on my own. Well I decided to open the housing ( I know , I can't send it to them now) and pull the drive out and hook it up inside my computer to see if I could get it to respond. No luck. The computer would not even see it through Admin tools in Windows. I shelved the problem for a couple months and decided to get back to it the other day. The Bios saw the drive and Windows as well. It's SATA drive by the way. For whatever reason Windows (in Drive Management tools) sees the one hard drive as 6 small drives. I have no idea why as I had never partitioned the original drive when it was in it's external closure. I ran GET DATA BACK NTFS on the drive and it pulled up all these really small folders with numerical titles. almost 40 of them or so. There is no data in any of these that resembled what I had on the drive. My original drive had 6 Folders: Movies , Pictures , etc. None of what showed up in these small folders even matched my original contents. One folder had "REAL" player I think , which I never used to my recollection. I went back in Windows and shrunk all the partitions down to one partition and re-ran Get Data Back. Same result. Why is the Program not seeing my original content?? I don't see one glimmer of what was originally on there. I've used the program before and I don't mind waiting to find the content. Any suggestions or ideas? I appreciate the input!! If there are issues with the drive, recovery software can only do so much. It looks like the data is scrambled enough that the program can't get all the info it needs construct the files. You had 6 folders, now you have 6 partitions, that's not a coincidence, Windows and the software just can't make out what the drive configuration is and is doing it's best to show it to you in a readable format. The wrong format, but all it can see. You also did some more damage to the drive by moving around those partitions making the data even more messed up. #1 rule in data recovery is freeze the drive in it's current form, don't install things, don't format it, don't give it to the dog to chew on. I'm guessing your drive has either partition errors and needs to be reformatted to work again, or it's a physical error and you need a new drive. I don't know if I did more damage than before. I only say this because I had Get Data Back rescan the drive with the same results after I shrunk the 6 drives back to one. I'm not saying I know what I'm doing , but the results were the same. I will certainly try more recovery programs tonight and see what I can find. The drive sounds physically fine. No noises. Pretty quiet actually.
OPCFW_CODE
I have to admit that I wasn’t a huge fan of this video, but it was really interesting to watch as a viewer, and I think it helped me understand some of the concepts that I wasn’t really sure about before. The video is the first part of a longer series called “Deep-Sea Robotics,” which is a series of videos that I’m making as a guest on the Deep-Sea Robotics YouTube channel. The videos are all about how a “deep sea fishing robot” is able to do the exact thing it’s meant to do: Fish. It basically looks like a giant squid, but only because its a giant deep sea fishing robot. The video was created by Mark Pecar, who is actually one of the founders of Deep-Sea Robotics. He also runs a consulting company called Deep-Sea Robotics. Deep-Sea Robotics is a company that I highly recommend if you want to go deep sea fishing. The Deep-Sea Robotics video explains that the fishing robot has sensors and a camera to locate the objects it’s fishing for, and it uses these sensors to scan the water and detect these objects and then relay what it finds back to the robot. In other words, the robot is using its sensors and camera to detect the fish and relay it to the robot. It then uses the robot to kill the fish. It sounds like a really cool device. The Deep-Sea Robotics video also explains that the robot is actually a “seismic robot” which means that the robot is basically a computer that does seismic surveys. That’s right. A computer that does seismic surveys. There’s a lot to like about the Deep-Sea Robotics project. You can literally use a small robot to locate a few fish in a big ocean. It’s like the future. This is the coolest video I have ever seen. I mean, I love fishing. But is that why they have robots? Because they are awesome at it? I don’t think so. Deep-sea robots are robots that work with their environment and don’t try and kill themselves. Deep-sea robotics are robots that use technology to survey the ocean floor. I think it’s cool that they are exploring the ocean floor, but I think it’s also sort of dangerous. You can’t control how deep you are going to go just because you have that robot. Its like driving a car that gets stuck in quicksand. I think that Deep Sea Robotics is a great idea for the future of search. I think that this is a space-industry, maybe the next big thing. I think that the best way to use robots like this would be as a way to help search engines make better suggestions for users. But even if that were the case I dont think they would be doing a good job at it. I think that with that technology, you need to be careful about how you use it. In the end, I think the best way to use robot is to use the technology to help the users and not hinder them.
OPCFW_CODE
Binary tree implementation in c++ · GitHub Learn more about clone URLs. Download ZIP. Binary tree implementation in c++. void destroy_tree(); void inorder_print() Tree C/C++ Programs - GeeksforGeeks ...tree C/C++ Program for Write C Code to Determine if Two Trees are Identical C/C++ Program for Write a C Program to Find the Maximum Depth or Height of a Tree C/C++ Program for Write a C... Binary Search Tree in C Previous. Trees in Computer Science. Binary Tree. Here, we will focus on the parts related to the binary search tree like inserting a node, deleting a node, searching, etc. Learn what a binary tree is, and how to implement one in C++ The binary tree is a useful data structure for rapidly storing sorted data and rapidly retrieving stored data. The typical graphical representation of a binary tree is essentially that of an upside down tree. This tutorial introduces you to binary search tree data structure and... A binary search tree or BST is a binary tree in symmetric order. A binary search tree can: Be empty. Have a key and not more than two other subtrees, which are called left subtree and right subtree. c++ - Destructor for Binary Search Tree - Stack Overflow I am trying to write the destructor for my Binary Search Tree and I know how to recursively loop through the tree, but I do not know how to do that in the destructor so that every node is deleted. C# Tree and Nodes Example: Directed Acyclic Word... - Dot Net Perls Develop a tree or directed acyclic graph. Each node contains references to child nodes. Example code. In the C# language a tree can be implemented with classes (nodes) that point to further nodes. Data Structure and Algorithms - Tree - Tutorialspoint Binary Tree is a special datastructure used for data storage purposes. A binary tree has a special A binary tree has the benefits of both an ordered array and a linked list as search is as quick as in a... Simple Tree Class C++ | DaniWeb Simple Tree Class C++. Home. Programming Forum. Recursive trees for Genetic Programming. need help in C program "of finding all possible Combinations". Binary trees - Learn C - Free Interactive C Tutorial Binary trees are used to implement binary search trees and binary heaps, and are used for efficient The operations performed on trees requires searching in one of two main ways: Depth First Search... C++/Tree Mapping Getting Started Guide C++/Tree is a W3C XML Schema to C++ mapping that represents the data stored in XML as a Besides the core features, C++/Tree provide a number of additional mapping elements that can be... Binary Search Tree in C++ | Free Source Code & Tutorials Note: We will use BST as abbreviation for binary search trees. Code is given with the tutorial separately for thorough understanding. In this tutorial, you will learn 1. About the data members of class of BST. Red Black Tree (RB-Tree) Using C++ | Coders Hub: Android Code... A red-black tree is a special type of binary tree, used in computer science to organize pieces of comparable data, such as text fragments or numbers. How to use C++ tree data structure in the competitive... - Quora Another kind of trees, are not highly attempted, or needed in competitve programming contests, but it A lot of problems need the useage of different kinds of trees. Firstly, in c++, there are the "set... K-d tree - Rosetta Code A k-d tree (short for k-dimensional tree) is a space-partitioning data structure for organizing points in a k-dimensional space. k-d trees are a useful data structure for several applications, such as searches involving a multidimensional search key (e.g. range searches and nearest neighbor searches)... B-tree set. B-trees are a generalization of binary search trees that aggregates data in blocks. This B-tree implementation is pretty standard and has no noteworthy unique features (it merely shows that...
OPCFW_CODE
""" Base of a topic engine, routing the payload to the configured deserializer """ import re from amqtt_db.base.constants import PAYLOAD_CONFIG from amqtt_db.base.errors import TopicNotFound, NoPayloadDefinition, WrongPayloadDefinition from amqtt_db.base.utils import get_class_func_by_name class BaseTopicEngine(object): def __init__(self, parent): self.parent = parent self.topic_handlers = {} # topic to handler mapping self.decoders = {} self.deserializers = {} self.topic_re = {} # topic to handler mapping self.read_config(parent.config) def read_config(self, config): """ Build a simple parse tree out of the payload config :param config: The payload config """ try: payload_config = config[PAYLOAD_CONFIG] except KeyError: raise NoPayloadDefinition try: for key, value in payload_config.items(): entry = list(value.items())[0] decoder_cls_name = entry[0] decoder_class = get_class_func_by_name(decoder_cls_name) if decoder_cls_name in self.decoders: decoder = self.decoders[decoder_cls_name] else: decoder = decoder_class() self.decoders[decoder_cls_name] = decoder deserialier_config = entry[1] deserializer_cls_name, types = list(deserialier_config.items())[0] deserializer_class = get_class_func_by_name(deserializer_cls_name) if deserializer_cls_name in self.deserializers: deserializer = self.deserializers[deserializer_cls_name] else: deserializer = deserializer_class(types) self.deserializers[deserializer_cls_name] = deserializer self.topic_handlers[key] = [decoder, deserializer] self.topic_re[re.compile(key)] = self.topic_handlers[key] except (KeyError, AttributeError): raise WrongPayloadDefinition def topic2handler(self, topic): """ Get a handler for a topic according to the payload definitions :param topic: :return: The handler """ if topic in self.topic_handlers: return self.topic_handlers[topic] else: result = self.match_topic(topic) if result is not None: self.topic_handlers[topic] = result return result raise TopicNotFound() def match_topic(self, topic): """ Match a topic to an handler according to the config :param topic: :return: """ for regex, handler in self.topic_re.items(): if regex.match(topic): self.topic_handlers[topic] = self.topic_re[regex] return self.topic_re[regex] return None
STACK_EDU
Upstart, one job definition to start and one to stop Hi i just discoverd ubuntu upstart (etc/init/* scripts). And i would like to use it to run a particular application. This app is run via a command line, but there is an argument to start an instance of the app and another argument to stop it (let say 'start' and 'stop') Is there a way to use upstart for that ?? I don't really understand what is the problem... Can you be more specific? In particular... why can't you specify an argument: myapplicattion --start when you want this triggered, and --stop in the event(s) you want it stopped. Also, you can run a script instead of directly call the application. In that script you can do whatever you need: check if it's running, start it, stop it... I dont know how to express ./script start (for start event) and ./script stop (for stop event). Or if it's easier i could run ./scriptStart and ./scriptStop. But i want both on the same job definition. If this may help, the script i want to run create a deamon. So alfter running the script, the shell give me back the controle (sorry for my english) It should be as simple as creating a file /etc/init/myjob.conf: description "My Job" # start conditions for automatic start # stop conditions for automatic stop pre-start exec /path/to/program start post-stop exec /path/to/program stop You should then be able to do sudo start myjob to start and sudo stop myjob to stop. See http://upstart.ubuntu.com for some more info. ok but i read this 'Additional shell code can be given to be run before or after the binary or script specified with exec or script. These are not expected to start the process, in fact, they can't. They are intended for preparing the environment and cleaning up afterwards.' here http://upstart.ubuntu.com/getting-started.html True that. On the other hand, /etc/init/ufw.conf seems to do exactly what's strongly discouraged there. :-/ ok, you are totally right. I'll give it a try. Given the info from your link I'm totally wrong. :D I just tested this with a small script and it seems to work, but if that's not the recommended way, then my answer is crap. It works, but it slows down boot up. The idea is that upstart reads all of its init/*.conf files in at one time, and then there's no more random scripts all over the filesystem to read/execute. If all you want to do is start/stop the service at boot time.. then drop your script in /etc/init.d and run update-rc.d yourscript defaults Or if that complains update-rc.d yourscript start 20 2 update-rc.d yourscript stop 20 0 update-rc.d yourscript stop 20 1 update-rc.d yourscript stop 20 6 Both of which will add it to the bootup/shutdown. Note that this means it will run as root.. so you may want to make sure the script switches users to your user or another one that you want this program running as. Upstart is meant to replace such scripts if at all possible. The motivations for this are faster boot and a more clear encoding of the information needed to know when to start/stop a service. It achieves the faster boot specifically by only having to read the contents of /etc/init to get started, which is especially beneficial when you add in ureadahead to the boot process, as the less random tiny script files ureadahead has to cache, the more it can spend time on important things like libraries and programs needed to start the system up. I'd bet that the script you're running does a few things to setup the environment, then runs the program in a daemonized mode. Your upstart job should seek to replace the script's logic, not just run the script. I tryied before using upsatart to use script in /etc/init.d But the script in there are much more complexe than these in /etc/init/ And about the script i use in my upstart script, i do not writ this one, in come with my app server(the app i want to run at start up). I guess you'll want it to start or stop on different events, so it'll be easier to set up two different jobs. If the same events toggles your app, then you could set up a job with a script that checks wether it's running or not, and hence starts or stops your app. ok, seems good to me. How could i check this status inside of a job definition, Is there a nomalised way? initctl list will list all jobs and if they're running or stopped. So you could use script within the job instead of exec.
STACK_EXCHANGE
In previous posts, we looked at setting up authentication with OAuth to access FreeAgent’s API. We’ve got something working but a couple of caveats remain when working with it from a rich client. To summarise the workflow; - Ask your user to authorise your application (on the target application’s servers). - You’ll be given an authorisation token from the above. Stash it. - Exchange your authorisation token for an access token. Stash this too (along with the refresh token). - Make requests passing along the access token to prove you’re you. The Authorisation Request It’s not always clear, but step 1. above is a one time operation. You don’t make this request every time your programmatically want to access the target application. It also implies that the GET request is made from the browser. There are “out of band” options but in-browser is the simplest. The Access Token Request Again, it’s not always clear but the access token request only needs to be made once. In fact, if you’ve successfully retrieved an access token and then request a new one, FreeAgent will error with a basic authentication failure. HTTP/1.1 401 Unauthorized Server: nginx/1.0.14 Date: Mon, 13 Aug 2012 18:13:44 GMT Content-Type: text/html; charset=utf-8 Status: 401 Unauthorized WWW-Authenticate: Basic realm="Application" X-UA-Compatible: IE=Edge,chrome=1 X-Runtime: 0.099212 X-Rev: 9301db5 X-Host: web3 HTTP Basic: Access denied. I think it’s trying to say that your application isn’t allowed to request a new access token whilst one is already valid. Refreshing the Access Token In a successful exchange of authorisation code for access token, you should see a response like this. In OAuth, The expires_in value should be the time in seconds that the access token is valid. RECOMMENDED. The lifetime in seconds of the access token. For example, the value “3600” denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. 604800 which is consistent with their documentation as it works out as 7 days. As this countdown starts when you exchange the tokens, I convert the number into a concrete date when I get the response. That way, I can see later if I actually need to refresh the token. However, it seems that you can refresh your token at any point. The process is similar to the requesting the original access token. Make a Basic auth HTTP POST but with a slightly smaller body. POST /v2/token_endpoint HTTP/1.1 Authorization: Basic Y2xpZW50X2lkOmNsaWVudF9zZWNyZXQ= Accept: application/json Content-Type: application/x-www-form-urlencoded User-Agent: Java/1.6.0_33 Host: api.freeagent.com Connection: close Content-Length: 127 grant_type=refresh_token&refresh_token=12wXjd7SL7SLOE1sdsaX8oCgix which will return something like
OPCFW_CODE
WebGL shader checking status of texture sampler2D I want to have prepared shader component (for multi sampler tex) In my current state i use (activate and bind) only 2 texture image. But this line : gl_FragColor = textureColor + textureColor1 + textureColor2; Makes trouble with my texture view as the texture I sample textureColor2 from is not bound. In shaders its not possible to use console.log or any other standard debugging methods.I am interested to learn more about shaders but i am stuck. Code : ... precision mediump float; varying vec2 vTextureCoord; varying vec3 vLightWeighting; uniform sampler2D uSampler; uniform sampler2D uSampler1; uniform sampler2D uSampler2; uniform sampler2D uSampler3; uniform sampler2D uSampler4; uniform sampler2D uSampler5; uniform sampler2D uSampler6; uniform sampler2D uSampler7; uniform sampler2D uSampler8; uniform sampler2D uSampler9; uniform sampler2D uSampler10; uniform sampler2D uSampler11; uniform sampler2D uSampler12; uniform sampler2D uSampler13; void main(void) { vec4 textureColor = texture2D(uSampler, vec2(vTextureCoord.s, vTextureCoord.t)); vec4 textureColor1 = texture2D(uSampler1, vec2(vTextureCoord.s, vTextureCoord.t)); vec4 textureColor2 = texture2D(uSampler2, vec2(vTextureCoord.s, vTextureCoord.t)); // Need help here gl_FragColor = textureColor + textureColor1 ; //gl_FragColor = textureColor + textureColor1 + textureColor2; //UPDATED QUESTION if ( ${numTextures} == 1) { gl_FragColor = textureColor; } else if (${numTextures} == 2) { gl_FragColor = textureColor + textureColor1; } else if (${numTextures} == 3) { gl_FragColor = textureColor + textureColor1 + textureColor2; } // i use simple pragmatic if else for now . // i pass value to the shader on load // i still cant update shader in run time /////////////////////////////////////// // This is segment of draw function : for (var t=0;t<object.textures.length;t++) { eval( " world.GL.gl.activeTexture(world.GL.gl.TEXTURE"+t+"); " ) world.GL.gl.bindTexture(world.GL.gl.TEXTURE_2D, object.textures[t]); world.GL.gl.pixelStorei(world.GL.gl.UNPACK_FLIP_Y_WEBGL, false); world.GL.gl.texParameteri(world.GL.gl.TEXTURE_2D, world.GL.gl.TEXTURE_MAG_FILTER, world.GL.gl.NEAREST); world.GL.gl.texParameteri(world.GL.gl.TEXTURE_2D, world.GL.gl.TEXTURE_MIN_FILTER, world.GL.gl.NEAREST); world.GL.gl.texParameteri(world.GL.gl.TEXTURE_2D, world.GL.gl.TEXTURE_WRAP_S, world.GL.gl.CLAMP_TO_EDGE); world.GL.gl.texParameteri(world.GL.gl.TEXTURE_2D, world.GL.gl.TEXTURE_WRAP_T, world.GL.gl.CLAMP_TO_EDGE); // -- Allocate storage for the texture //world.GL.gl.texStorage2D(world.GL.gl.TEXTURE_2D, 1, world.GL.gl.RGB8, 512, 512); //world.GL.gl.texSubImage2D(world.GL.gl.TEXTURE_2D, 0, 0, 0, world.GL.gl.RGB, world.GL.gl.UNSIGNED_BYTE, image); //world.GL.gl.generateMipmap(world.GL.gl.TEXTURE_2D); world.GL.gl.uniform1i(object.shaderProgram.samplerUniform, t); } ... Maybe in run time best way is to manipulate with object.textures array ?! Finally : Override shader with new flag Compile shader New material is updated you probably want to be mixing your colours rather that summing them. I have many shader examples HERE, and some on my jsfiddle HERE. Note though, the examples are for older versions of three.js and the way the lighting information is sent to shaders has changed. Thank you , nice examples i will explore this for sure...
STACK_EXCHANGE
Blogger just introduced a new Dashboard in Blogger in Draft. Here's the main part of it. And, to compare, here's the old dashboard: I went to the Blogger in Draft blog to post a complaint but saw that plenty of people had beaten me to it. And what are we all complaining about? I have to say I don't have any idea why they made this change. Yes, it looks a little nicer, but I can't figure out who they thought they were going to help by only showing the two most recent blogs. And they used a magic number of two -- where did that number come from? Let's look at potential users: - Bloggers with 2 blogs or less -- these people won't even notice the change. - Bloggers with 3 to 5 blogs -- these people probably rotate around their blogs, for a variety of reasons. For none of these people, can I imagine that "most recent 2 blogs" is a good set. For myself, I'm more likely to want the blog I posted on least recently to show up, as a reminder that I've been remiss in posting. - Bloggers with 6 or more blogs -- these people are power users and probably have a variety of usage patterns, so there may be no way to satisfy all of these people (but we should try). I was initially surprised that, when I clicked Display all 3 blogs, they didn't remember that. In retrospect, however, that's correct. If they're hiding things and the things they're hiding changes over time, then I suppose they shouldn't remember that you hid anything. But that's a big IF and I think it's a flawed assumption. What's the solution? Well, there's the obvious one, proposed by a number of people commenting on the blog post about the new feature -- add an option to always show all the blogs. But, I've said before that every option we add to a user interface is a decision that we were unable to make during development. So, can we just do something that is so obviously right that everybody will just think it is the right thing? Sure we can. First, we need to know what the objective is. I think the goal was to make cleaner, simpler, and smaller for users who have blogs that they're not posting to. I think they forgot easier to use, which should always be a goal because, if we forget that, it is way, way too easy to add features that actually make our products harder to use. And hiding things should never be a goal -- it should only be a way to accomplish a benefit, which it fails to do here. Here's what I think they should do: - Keep the new look -- it's fine. - Show all the blogs, in most-recent-to-least-recent order. - Collapse all blogs that have not been "touched" in the last month. - Provide a expand/collapse icon or button to allow the expansion of any blogs that are collapsed. The collapse icon is probably not necessary, but having it there for symmetry is a good idea. A change like this would not even be noticed by most users -- it would just feel right. Users with multiple, active blogs will see no change. Users with dormant blogs will have them taking up less space and won't mind that. Even power bloggers with lots of blogs, will get the right behavior for both their active and dormant blogs. It's a win all around. - Focus on the real goals of your UI change. - Make sure that "ease of use" is always (or almost always) part of your goals. - Aim for a change that just feels right -- that people will just say "that's the way it should have been in the first place"
OPCFW_CODE
Deterministic encryption for a limited space: using HMAC as IV I need a deterministic encryption scheme. Objectives: Same plaintext is always encrypted to the same ciphertext. Related plaintexts are encrypted to unrelated ciphertexts. (For example, attacker that knows two ciphertexts cannot learn whether corresponsing plaintexts start with the same prefix.) Minimum space overhead. Authenticated encryption, of course. There are also some conditions that can make it easier: I will encrypt small chunks of data (few dozens of bytes). It is OK to store them in RAM and then start the encryption. The data will be padded to the same length all the time. So, we can conclude that the length of the data is some constant. Not high performance requirements. My first idea was to derive IV from plaintext using HMAC and then use some authenticated encryption that requires just unique (potentially predicable) IV, like AES-GCM. This requires both IV and authentication tag to be stored alongside the encryption. But I have an idea how to reduce the space overhead even more: Use (H)MAC of plaintext as both IV and authentication of the message. I know this is generally dangerous scheme close to encrypt-and-mac, so I specify further requirements for primitives to mitigate its risks: The encryption will not leak anything when decrypting attacker-manipulated data. Stream ciphers like AES-CTR look suitable here, because there are no risks related to padding oracle. It just “decrypts” any attacker-provided garbage to another garbage (rejected by MAC check later) without any side channel if the AES-CTR implementation is correct. The encryption requires just unique IV, but it does not have any other requirements. Thill, AES-CTR looks suitable here. The MAC does not leak any data. HMAC (potentially truncated to 128b) looks suitable there, because failing to do so would at least weaken preimage resistance of the underlying hash function. Encryption uses a key completely unrelated to authentication key, of course. (This requirement might not be strictly needed when using, say, AES-CTR with HMAC-SHA256. However the requirement makes reasoning about security easier.) The whole schema looks somewhat exotic to me, but I believe that I have resolved risks arising from the unusual design. Is there any existing work that aims to do the same? Is there any research that would tell me anything about security of this scheme? @DiscobarMolokai Good point. After a brief look, it looks much like my design, but with some CMAC-based construction instead of HMAC. The only issue I see there is that it seems to be hard to find usable implementations of SIV. I've seen some that have their own AES implementation in C, apparently not relying on AES-NI. I am afraid that many such implementations will be vulnerable to side channels. @IlmariKaronen Thank you for the link, I was unable to find it. (I googled for “hmac as iv”, with no suitable result.)
STACK_EXCHANGE
remove unused black pre-commit hook and blacken docs Summary of changes pre-commit is configured in this repo. This means whenever I commit I automatically apply these fixes. Because pre-commit is not run in CI the compliance has drifted, and so when I commit I get a huge number of changes! Closes Pull Request Checklist [ ] Changes have tests [ ] News fragment added in changelog.d/. (See documentation for details) @graingert Hi. I'm not the mainteiner of this repository, but I'll give you some advice. Your commits contain too many redundant codes. So we can't know what the purpose of this PR is and what it's modified. If you are using VScode, I think the python linter works automatically when you save the files. You should turn off the option in VScode settings, and then push the commits again. @graingert Hi. I'm not the maintainer of this repository, but I'll give you some advice. Your commits contain too many redundant codes. So we can't know what the purpose of this PR is and what it's modified. If you are using VScode, I think the python linter works automatically when you save the files. You should turn off the option in VScode settings, and then push the commits again. Hello, first thanks for the advice! However in this case I was simply running "git commit" when these automated changes were applied by the configuration in this repository @graingert Hi. I'm not the maintainer of this repository, but I'll give you some advice. Your commits contain too many redundant codes. So we can't know what the purpose of this PR is and what it's modified. If you are using VScode, I think the python linter works automatically when you save the files. You should turn off the option in VScode settings, and then push the commits again. Hello, first thanks for the advice! However in this case I was simply running "git commit" when these automated changes were applied by the configuration in this repository Oops.. I thought the redundant codes was included using the linter, not Black. But I just run Black with these files you modified, and found out you were right. I'm not sure if it's okay to include this change because I'm not the maintainer of this repository, but I'm sorry I gave you advice without even trying and even thinking... @jaraco I have a question! As I talked to @graingert above, can unnecessary codes caused byBlack be included in the commit? Unnecessary here means that it is different from the original purpose that it was intended to be modified. This repository stated that Black is used as the main linter, but there are actually files that are not applied with Black. I'd also be happy to draft a new PR with my doc syntax error fixes, and the black configuration removed There are many things going on here. This project derives from jaraco/skeleton, so that’s where the black configuration comes from. It’s is meant to be disabled because this project hasn’t actually adopted black yet, though I plan to do so soon. I don’t experience the problems because I haven’t opted in to pre-commit for my local checkout of this repo. Is that an option for now (opt out of pre-commit for Setuptools) or do you have another tool that automatically installs precommit? @jaraco I use existence of .pre-commit-config.yaml as a call to action to use it @jaraco I use existence of .pre-commit-config.yaml as a call to action to use it Totally fair and reasonable. I guess what I'm asking is that you disregard that call temporarily for this project. I think it's still worth applying blacken docs though I'm slightly opposed to running pre-commit in CI. It will add a new workflow and thus noise to the UI. For example, it will double the number of lines that appear under the actions list: And while that might seem like a minor annoyance, it's caused me real difficulty when I go to that page to check out the status of a push or tag. When I quickly glance at that page, I have to mentally filter out "pre-commit" lines. I've previously had to do this for a different workflow I'd enabled called "automerge", which I recently removed because it was not useful, and I was relieved that I no longer had to mentally filter out those unhelpful workflow results. That said, if the best thing to do for a project that has pre-commit hooks is to run them in a workflow, I may consider doing that, but I'd want to do it in jaraco/skeleton, so I'm not having to manage it for each project separately. I can see that I added the pre-commit hooks in https://github.com/jaraco/skeleton/commit/d0f07a4e7ad465b0935bf85da94b12b9b8cc2e77#diff-63a9c44a44acf85fea213a857769990937107cf072831e1a26808cfde9d096b9. Unfortunately, I didn't leave myself enough breadcrumbs to track what instigated me to add that. I think someone suggested in another project that it's something every project should have and so I adopted it experimentally. I'd like to ponder this some more, but I appreciate you sharing the suggestion. Ok I deleted the extra workflow. I still think it's worth getting doing the doc fixes here I agree that a lot of the changes herein are worthwhile. I'm a little less certain about others. In particular, there are two changes that blacken-docs makes with which I'm uncomfortable and likely to revert: The conversion of multi-line expressions to single-line expressions clumps the arguments together, making them less distinct and discernible. Especially with documentation, whitespace can really help to convey an idea or to set the stage for a larger block. Consider for example: In this block, the newlines are there to express that the user should probably have their parameters one per line. Just because they happen to fit on one line is not a good approach, especially when additional parameters are probably going to force the syntax (even when black) to be on multiple lines. For actual code, the flux of these transforms is acceptable. For docs, it's an impediment to their core purpose. The forcing of the more heavy double-quotes (both by pixels and by keystroke) in contradiction with Python's default repr is unnatural and violates this project's specified opt-out for that change. Unfortunately, due to asotille/blacken-docs#62, blacken-docs doesn't support the few preferences that black supports. For this reason, I'm going to remove blacken-docs from the pre-commit builds (at the skeleton). I would like to keep the other changes. Thanks for the contrib! For that specific example you can use ..., to force black to explode that line It's probably worth finding an alternative to check the syntax of these docs
GITHUB_ARCHIVE
Today, I had to execute a security scan against some of mine Red Hat hosts and surprisingly (at least to me) the results were not what I have expected ……. Not to mentioned that the side effect was my AD account being “ LOCKED OUT ON THIS DOMAIN CONTROLLER” preventing me from log-in to over one hundred of hosts. Looking at the report documenting the offenses, I recognize that it is not that “my” hosts are at fault but it is the “scanner” fault, of course! Apparently, McAfee “looks” not just for the running but all LINUX kernels present on a host. So even if I did yum -y upgrade and immediately followed it with another scanner run the process will flag this host as a “failure” because of the presence of the older kernels. It comes back to me now. Years ago, when I worked with the Interactive UNIX (the origin of SUN and AIX) I had to deal with multiple kernels – once or twice I had to remove some to gain back storage capacity on a host. You may already know the question of today but if you don’t do not worry too much – here it comes: “how to list the kernels and how to remove them from a RedHat machine?” To list kernels on a RedHat host, execute: # rpm -qa kernel kernel-2.6.32-279.19.1.el6.x86_64 kernel-2.6.32-279.14.1.el6.x86_64 kernel-2.6.32-279.11.1.el6.x86_64 To list your current kernel (the short version): # uname -r 2.6.32-279.19.1.el6.x86_64 To list your current kernel (the long version): # uname -mrs Linux 2.6.32-279.19.1.el6.x86_64 x86_64 The last two entries tell us that the running 2.6.32-279.19.1.el6.x86_64 kernel (active) is the most up to date one. So to remove the other (non active) kernels, I have to execute these two steps: # rpm -e kernel-2.6.32-279.11.1.el6.x86_64 # rpm -e kernel-2.6.32-279.14.1.el6.x86_64 To verify that there is just one kernel left – the one I wanted to keep: # # rpm -qa kernel kernel-2.6.32-279.19.1.el6.x86_64 Is there a way to switch kernels on a live RedHat hosts so when it boots next time it uses a different kernel? I know that a kernel selection can be made at boot time. Do you know about any other way? If so please let us all know too, thanks! I feel, this post would not be complete without this message: To install kernel packages manually, use "rpm -ivh [package]". Do not use "rpm -Uvh" as that will remove the running kernel binaries from your system. You may use "rpm -e" to remove old kernels after determining that the new kernel functions properly on your system.
OPCFW_CODE
I pay for 30/5 internet and my down lately has been super slow on my PC. hitting as low as 0.4Mbps up to 34Mbps... over the last few weeks its been hovering around the 4 to 17 range. I monitored it closely one night for 4 hours and This Was The Result. So after contacting my ISP and they came out we realized that this was my problem and not theirs. more specifically... it was my single PC getting low speeds. NOTHING ELSE. My phone pulls 34Mbps consistently through the WiFi on the same network. My laptop pulls 34Mbps Hooked up to the modem and via WiFi My Tablet pulls 30Mbps through WiFi My computer pulls crap from the router or hooked up to the modem. I have installed the most up-to-date drivers for my NIC and the ones that came with the mobo disc and still crap. I have uninstalled it and reinstalled the NIC and nothing I just reinstalled windows yesterday and still nothing i bought a wireless USB adapter and STILL pulling around 10Mbps right now. and yes i tested this with ONLY the computer connected and no devices. I have checked all my cables and ruled out any damaged cables (even though since the WiFi is working for everything else i know my problem is not in my router or modem.) the problem exists on my computer alone regardless of connection type or path and is pulling crappy numbers from a network that is putting out great numbers... so the problem has to be internal on my computer....... but what??? OS:-----------Windows 7 Ultimate x64 MoBo: --------ASRock FM2A75 Pro4-M Processor:----AMD A8-5600K APU Radeon HD 7560D Graphics 2:---AMD Radeon HD 7850 Mem:----------Corsair Vengeance 16GB (2x8) BIOS:---------American Microtrends vp2.10 3/14/2013 NIC:----------Realtek PCIe GBE Family Controller I updated to the latest Atheros 8152/8158 drivers v 220.127.116.11. Download speed is fine, upload speed is not. Should be a 50 down/50 up Mb connection. Getting 47mb down which is great, getting.54 mb up which is horrible. Tried a different laptop on the same ethernet cable, speeds were great up and down. The problem only exists on one laptop. I posted about this previously ~2-3 weeks ago here, and while the posted fix (flushing DNS) worked for some time, my internet (via wifi) has reverted back to speeds between 15-30mbps when it should be 180+mbps (other PCs, phones, tablets all posting this figure via Fast.com and Speedtest.net). I have tried that same fix again, along with using countless other lines in the command prompt suggested in articles like this or this, and nothing has resolved the persisting issue. Other things I have checked/tried include: Plugging directly into the router (fast as expected) Swapped out the wifi card for a new one (same speeds) Tried a different PCI-E port (same speeds) Disabled Windows 10 background updates (no effect) What could I possibly be missing? I do not have too many devices connecting, as I have removed other devices on the network, there's no issue with the PSU or with grounding, it's not a driver issue (reinstalled drivers each time I tried a new card). 100% Free Computer Speed Up Thanks for the help!
OPCFW_CODE
Join Citi’s Markets Global Technology Team Citi’s Markets technology team in Russia is growing at lightning speed, and we’re looking for talented technologists to help build the future of global banking. Our teams are creating innovations used across the globe. As the world’s most global bank, Citi provides an opportunity to touch every corner of the globe and deal with incomparable trade volumes, scale and impact. We’re looking for new, innovative ways to push the boundaries of financial technology. Citi’s global technology team spans 84 countries, with almost 30,000 colleagues managing thousands of projects across the globe. Russia team is young but rapidly growing. Join an environment with a laser focus on growth and progress, and take your career to the next level through the power of Citi’s unmatched globality and vast expertise. We are currently building a new team that will be responsible for FX Risk platform. The team will be implementing new business-driven functionality, technical and regulatory-driven changes, QA Automation and DevOps. There is a set of old and new platforms that serve the Client’s FX business in 38 countries by providing the ability for real-time derivatives pricing, risk management and data analytics. New initiative is starting to replace a set of the existing risk applications with new generation ones with web-based UI, server side computing and grid technology, distributed in-memory solutions. The new application is going to serve traders, sales and external clients. Solution needs to be scalable to cover multiple locations and meet latency requirements. We have three main challenges: 1. Performance: we calculate a lot of analytical numbers on our grid farm (10к+ CPUs), and to handle them quickly we have to keep them distributed in-memory caches. There are hundreds of gigabytes in-memory data that needs to be aggregated and cooked for user requests under one second. Knowing algorithms complexity is a must for us. Knowing how JVM/JMM/GC work is a must. Core Java is the main working tool that allows us to value or interpolate millions of trades under one second. 2. Capacity: This type of system is core to Investment Banking as a result it is super critical to have perfect stability, it is super critical to be able to scale quickly in case of volatility spikes on the market. No one needs a system that will not work during big events. That is why we do care about fault tolerance, we do care about the ability to deploy quickly, so use clouds, microservices and we try to have real continious delivery. 3. Business complexity: There are lot of complex things in this domain area, you will need to understand how all this works, how math models for risk computation are working. Sometimes you need to find workarounds in logic to be able to do things that at the very beginning looks impossible, like moving from O(n^2) to O(n) for some business problem. Apart from that, there are IT complexities that we can hide and improve not only for this exact project but for the whole bank. We are looking to build a strong team of professionals, with different skill sets. It is really not that important if you don't know how one or another library work, for us - Core Java, DataStructures, Algorithms knowledge is far more important. At the same time, it would be great if you have experience with things we are using: Kafka, Apache Ignite, Microservices, Docker, Distributed Computing, Protobuf, Netty, JS/React, Python Ping us, if you think these challenges are interesting for you... What we offer: У нас нет доступа к вашему номеру телефонаВойти через email
OPCFW_CODE
I started to look at some of my pfsense router logs and saw a HUGE spike in DNS traffic around midnight (was tossing and turning in bed but not using any devices). 99% of this traffic was my OP6 phone hitting port 53 on my router’s IP. What could be the cause of this? Also what makes a device hit DNS on the router or strait out to a server such as 22.214.171.124? Default DNS settings, I had to rebuild my pfsense and haven’t re-setup DNS over TLS yet. pfblockerNG is setup along with DNSBL Is this simply pfblockerNG + DNSBL? And if so, why a ‘storm’ of DNS requests (I’m assuming requests)- sh*tty app? The default DHCP settings for PFSENSE hand out your (pfsense) router’s IP address as the DNS server for your LAN. DHCP is responsible for giving a device its DNS settings if the device is set to obtain automatic network configuration. If the device has hard-coded google DNS for an app or service running on it, it will perhaps hit that instead. As to why the storm happened… maybe midnight is some sort of update schedule for your phone. Maybe its owned? Who knows :D. If you’re curious see if there’s a trend and as above do a packet capture. Also… potentially, if your phone updates automatically over night, it could be the case that it re-started and thus DNS cache expired, and all the locally cached results that were in memory on it were no longer cached. And thus in order to deal with that, rather than local cache hits everything running on it needed to do an initial look up. I have Google play set to manual updates, and I upgrade about once every two weeks. I didn’t think to also check my pfblocker dnsbl logs as well, would be interesting to see- I might have derped and not set that to log and it’s why all the traffic was action=allowed to the router IP but then maybe blocked by a floating rule for add IPs (not logged)? If you can do a packet capture while it’s happening you’ll be able to see what domain the phone is trying to look up, that might help figure out if it’s an issue or not. I used to have a handy mirrored port on my switch that was connected to a vm that I could use to quickly packet sniff any interface on the network. It was super handy for stuff like this but just doing a packet capture should be fine especially if its happening at a predictable time. There might also be a logging option on the pfsense dns server that you could use to see what’s being looked up. Yeah, auto configuration for both IP address and DNS is handled by DHCP. DHCP can also set a heap of other client device options such as time server, local domain, PXE boot host, proxy configuration, etc. But most people only use it for IP+DNS server; and unless tweaked, pfsense’s default DHCP scope settings will dish out itself as the DNS server unless you configure otherwise. A device hitting local DNS a lot isn’t a problem. Most devices don’t use a local DNS cache because it’s actually cheaper to just hit the local server. It’s 2 or 3 milliseconds vs potentially holding megabytes of DNS cache in local RAM. Aliens! More seriously, I can only guess which is a waste of time. Do you control the dns traffic? What domains per device are resolved … I personally block all traffic 53 and the devices only have the right to use the pihole. Do you use fw on your phone, something like NetGuard? Check which application is performing specific queries. I block everything and only allow what I need at the moment. I can also see exactly the traffic that the app generates. If you know the domain names and which application or part of the phone is making this move, it will then be possible to determine what is on the wall. So find out what generates the queries to understand why this is happening. More and more applications for both PC and Android have dns servers sewn in and make additional connections without looking at the system NS. Among other things, I mentioned Avast in this post … Thanks all for the inputs- unfortunately the traffic data logging was not on at that time (in my rant thread Suricata had a bug that filled up my pfsense’s HDD). I can re-enable packet captures or the less granular but darn near as much Suricata eve.json for the ‘next’ time. For now I’m going to assume some app’s server went down for a while and caused the app to freak out a bit, and not dwell on ideas of some kind of compromise.
OPCFW_CODE
An audio plug-in, in computer software, is a plug-in that can add or enhance audio-related functionality in a computer program. Such functionality may include digital signal processing or sound synthesis.[page needed] Audio plug-ins usually provide their own user interface, which often contains GUI widgets that can be used to control and visualise the plug-in's audio parameters. There are three broad classes of audio plug-in: those which transform existing audio samples, those which generate new audio samples through sound synthesis and those which analyze existing audio samples. Although all plug-in types can technically perform audio analysis, only specific formats provide a mechanism for analysis data to be returned to the host. The program used to dynamically load audio plug-ins is called a plug-in host. Example hosts include Bidule, Gig Performer, Mainstage, REAPER and Sonic Visualiser. Plug-ins can also be used to host other plug-ins. Communication between host and plug-in(s) is determined by a plug-in API. The API declares functions and data structures that the plug-in must define in order to be usable by a plug-in host. Additionally, a functional specification may be provided, which defines how the plug-in should respond to function calls, and how the host should expect to handle function calls to the plug-in. The specification may also include documentation about the meaning of variables and data structures declared in the API. The API header files, specification, shared libraries, license and documentation are sometimes bundled together in an SDK. List of plug-in architectures |Name||Developer||License||GUI support||Supported types||Supported platforms| |Rack Extension||Reason Studios||BSD-style||Yes||Transformation and synthesis||Mac OS X and Windows| |Virtual Studio Technology||Steinberg||Proprietary or GPLv3||Yes||Transformation and synthesis||Mac OS X, Windows and Linux| |Audio Units||Apple||Proprietary||Yes||Transformation and synthesis||Mac OS X, iOS and tvOS| |Real Time AudioSuite||Avid||Proprietary||Yes||Transformation and synthesis||Mac OS X and Windows| |Avid Audio eXtension||Avid||Proprietary||Yes||Transformation and synthesis||Mac OS X and Windows| |TDM||Avid||Proprietary||Yes||Transformation and synthesis||Mac OS X and Windows| |LADSPA||ladspa.org||LGPL||No||Transformation||Mac OS X, Windows and Linux| |DSSI||dssi.sourceforge.net||LGPL, BSD||Yes||Transformation and synthesis||Mac OS X, Windows and Linux| |LV2||lv2plug.in||ISC License||Yes||Transformation and synthesis||Linux, OS X, Windows| |DirectX plugin||Microsoft||Proprietary||Yes||Transformation and synthesis||Windows| |VAMP||vamp-plugins.org||BSD-style||No||Analysis||Mac OS X, Windows and Linux| |CLAP||Bitwig and others||MIT-style||Yes||Transformation and synthesis||Mac OS X, Windows and Linux| - ^ Collins, Mike A. (2003). Professional Guide to Audio Plug-ins and Virtual Instruments. Burlington, MA: Focal Press. ISBN 9780240517063. - ^ a b Goudard, Vincent; Müller, Remu (June 2, 2003), Real-time audio plugin architectures (PDF), IRCAM, p. 8 - ^ Cannam, C. 2008., The vamp audio analysis plugin api: A programmer’s guide. . Revision 1.0, covering the Vamp plug-in SDK version 1.2. 51 - ^ Gibson, D. and Polfreman, R., 2011. "An Architecture For Creating Hosting Plug-Ins For Use In Digital Audio Workstations.", In: International Computer Music Conference 2011, 31 July - 5 August 2011, University of Huddersfield, England. - ^ VST SDK - ^ VAMP SDK - ^ Reason Studios Rack Extension SDK - ^ Reason Studios Rack Extension SDK License - ^ "VST 3 SDK License". February 23, 2017. - ^ "Welcome to VST SDK 3.7.x". GitHub. February 21, 2022. - ^ "Apple Developer Documentation". - ^ github.com/free-audio/clap
OPCFW_CODE
In this short note, I want to touch on the topic of collaborative confidential computing and try to briefly outline the essence of these approaches and dispel several ambiguities that have developed in the interpretation of this term in the modern information field. Hope it works 🙂 I will start a little from afar, in general I am interested in the topic of distributed data processing with confidentiality, in particular, I am actively looking at the development of such a direction as Federated Learning. I often come across articles and materials on this topic in which I observe some terminological confusion, since the terms Federated Learning and Confidential Computing are often used as synonyms, but this is not entirely true. Maybe I’m not quite right, but the set of methods for “learning” (learning) and for “computing” are actually different and are not a subset of each other. Therefore, first of all, I want to speak about my understanding of their fundamental difference: Federated Learning – a set of methods, as a result of which we get a trained mathematical model (which is used for inference on new data sets) and they are closely related and inseparable from machine learning methods. These methods solve such applied problems as training a scoring model based on data from different banks, without exchanging data about borrowers, etc. Confidential Computing – a set of methods, the purpose of which is the joint processing of data, that is, the direct use of data in the calculation (find the average, sum, etc.) of some mathematical expressions in order to obtain the exact result of mathematical calculations. For example, these methods solve such applied tasks as obtaining a sales report for the entire market as a whole based on data from different retail chains, each of which does not want to disclose its sales volume, etc. Sorry for the possible clumsy wording, but I wanted to focus on these fundamental differences. I hope you feel the difference. But in fact, if you delve into the subject in more detail, then even more advanced methods and terms like “differential privacy” and so on appear, but the creation of a complete anthology of this area is probably the topic of a separate article, and within the framework of this material, I would like to focus on on the above two classes of methods. The key conclusion here is – do not confuse Federated Learning and Confidential Computing – these are different methods for different classes of tasks, although they solve the same problem. So there are quite a lot of these methods that fall into the area of Confidential Computing. Most of them lie in the field of mathematics and cryptography. But in the last couple of years, this topic has been popularized very much by the fact that hardware developers (Intel, IBM, NVidia) have implemented data protection at the hardware level. That is, by uploading data to one server, you get a guarantee that without your permission no one will get access to your data, even having physical access to the server, which previously could not be guaranteed by any methods, since any software protection in general has always been leveled by the threat of compromise physical access to the storage system. At the same time, in order to share data, on such a server (where data from different owners are loaded) you still need to use either FL or CC or some other methods of joint data processing. But using them within a single server makes the calculation process much simpler and faster. By the way, little attention is paid to this, but I must note that there is a very subtle point of finding the right compromise: all these methods (both FL and CC) are applicable both for a distributed structure (the data is on different servers, each of which belongs to a separate owner), so for centralized (data lies on one server that supports hardware protection functions). But only if: distributed computing – we have all the problems of distributed computing: unreliable transmission medium, data source compromise, slow transmission speed, asynchronous operation of nodes, etc. PR from hardware vendors has led to the term Confidential Computing becoming synonymous with hardware data protection techniques. Here, for example, the TAdviser website gives the following definition: “Confidential Computing is designed to protect the data in use using a hardware trusted execution environment (Trusted Execution Environments, TEE). This is a rapidly growing data security sector, which has recently received increased attention .” But it is not so. Hardware protection is a prerequisite for using SS methods, but in itself it is not a data processing method, but only a guarantor of their protection. In any case, in a distributed environment or in a centralized one, algorithms for processing them must be applied to the data, which belong either to the CC class or to the FL. The key takeaway is that the hardware protection method advertised by the vendors is an enabler for applying secure collaborative computing methods, not the method itself. Now let’s now turn to the very mathematical essence of methods of joint confidential computing. We go to our favorite Wikipedia and find such a correct and clear definition: “In cryptography, a confidential computation protocol (also secure, secure or secret multi-party computation, eng. secure multi-party computation) is a cryptographic protocol that allows several participants to perform a calculation that depends on the secret input data of each of them, in such a way that no participant was unable to obtain any information about the other’s secret inputs.” This definition is very correct. But further on the page there is a rather complicated mathematical explanation, and if you turn to scientific articles in which cryptography of the entire process is also used, then it becomes completely sad. But now, attention, an enchanting example that reveals the essence of these methods right on the fingers. In fact, I spied him in the report of Peter Emelyanov, from the company ubic.techwhich was made as part of our “Federated Learning” section at the conference OpenTal.AI 2022. there are 3 friends, each of whom owns some amount of money. We want to calculate the total amount of money that 3 comrades have, but on the condition that each of the comrades does not know how much the other participants own. Step 1: each friend decomposes his sum into 3 (the total number of participants in the calculation) numbers in an arbitrary way, while maintaining the condition that the sum of these numbers must be equal to the amount of money the friend has, otherwise the numbers can be arbitrary. Step 2: each participant keeps one number, and exchanges other numbers with each other participant. Step 3: each participant sums up the numbers received from other participants with his own number and receives the sum Step 4: each participant publishes the resulting amount to all other participants Step 5: each participant calculates the average of the received amounts and receives the total average of all money from the calculation participants Outcome: each participant found out the average amount of money in the group, but did not reveal to any other participant the amount they own. The whole process is clearly shown in the figure: That, in fact, is all. In my opinion, this scheme reveals the essence of the methods of “joint confidential computing” as simply and clearly as possible. Hope it was helpful 🙂
OPCFW_CODE
The sky's the limit Ways to draw the elements of the sky or distant objects that looks stationary even as the player move. They can be combined so one can have a skybox for stars and skyscene for the ships that fly around a space station the player walk around in. When the player can jump in a plane and take to the sky the clouds can no longer be part of a skydome or box. - Realistic Cloud Rendering Using Pixel Synchronization, 2014 - A Simple, Efficient Method for Realistic Animation of Clouds, 2012 - Let There be Clouds! Fast, Realistic Cloud-Rendering in Microsoft Flight Simulator 2004: A Century of Flight - 2004 - Real-Time Cloud Rendering, 2001 - Dynamic Volumetric Cloud Rendering for Games on Multi-Core Platforms, 2000 A sky scene is similar to a skybox but a whole scene is used a rendered centered on the player. It can be used to show distant movable things such as rotating planets in the distance or spaceships that fly by outside a space station. A skybox is a box centered on the camera where each side has a texture of that represent the view of the distant world in that direction. So the upside show the sky and the side ones show the distant mountains. The down one it not normally seen in a game with terrain but the skybox but it can be seen in a space simulator. More then one skybox can be used if some of them are transparent. For example a distant ones with stars and another one with the moon. The moon skybox can then slowly rotate to make the moon move. Skyboxes are often used to show content that do not animate. - Skybox DirectX Put a half sphere (dome) centered above the camera. Use a texture on it, modify the vertex colors or a combination of both. By changing the vertex colors it is possible to model dusk and dawn. - Journal Entry 26 – Skydome - 2013 - Drawing a simple skydome - 2011 - A fast, simple method to render sky color using gradient maps, 2006 - Real Time Rendering of Atmospheric Scattering Effects for Flight Simulators, 2003 - Sky Domes, 1999 - Tutorial 11: Bitmap Clouds This is a plane that is always above the camera. It can have some textures on it of clouds and it can also be animated. Set the background color to blue and you are done :). Dark and Stormy - 2018 Dark and Stormy - 2018 Stormscapes: Simulating Cloud Dynamics in the Now
OPCFW_CODE
|Q. What are the essential elements of a CICS Map?| |Organic Chemistry involves the study of complex Carbon-compounds, that are made up of simple elements like Carbon, Hydrogen, Nitrogen, Oxygen and combine to form bonds. CICS Maps are like Organic compounds, they consist of several elements, which form bonds and result in a nice, user-friendly interface. Every CICS Map consists of elementary Field(s). A field is any data-item on the CICS Map. Fields meant for Input, where the user can type some data, are called Data-Entry Fields. Fields that are used to display Titles, Labels or Captions, Alert Messages are Display-Only Fields. | |Q. What are Attribute Bytes?| |Every Field on the CICS Screen has Characteristics or Attributes. Attributes control the look-and-feel as well as the behavior of the field. This attribute information is stored in one Extra Byte. As you may see in the below diagram, each field is preceded by an Attribute-Byte. In fact, a field doesn't end, unless the next Attribute-Byte comes in. Take a look at the picture below. | One field ends, when next field's Attribute Bytes comes in. What happens in the case of the Data-Entry Field? Had the Data-Entry Field been followed successively by another field, it's attribute byte would have ended the Data-Entry field. But, there’s no field that succeeds the Data-Entry Field on the same line. So, the Data-Entry field would extend all the way upto the next line, where the Attribute-Byte of the Caption Field 'Name and Address . . . :' is encountered. To avoid this, you would have explicitly code a Closing Attribute Byte after the Data-Entry Field. |Q. How to code the Attribute Byte?| |There are three important parameters you can control, using the Attribute Byte – Intensity, Protection and Shift. | The Intensity relates to the appearance of a field. NORM stands for Normal Intensity, BRT for Bright Intensity. DRK Dark helps to hide characters, by darkening them, like when you type Passwords. The Protection allows/dis-allows the user-entry of data into a Field. PROT stands for protected, and implies that this field is a Display-Only Field. UNPROT stands for Un-protected and implies that this field allows data-entry. A Skip-Field defined by ASKIP is a special type of a protected field, where automatically skips the cursor to the next un-protected field. The Shift Attribute determines whether the keyboard is in Alphanumeric Shift or Numeric Shift. If you code NUM, it turns the Numeric Lock ON, so the user can only enter Numeric Digits 0-9. The Attribute-Byte contains 8-Bits of Data. The first seven bits store the Intensity, Protection and Shift Information. The Last bit of the Attribute-Byte is called Modified Data Tag(MDT). If the MDT contains Bit 0, it means data in this field has not changed. If the MDT Bit contains 1, it means data was modified. |Q. What are the important BMS Macros that you code to create a new Map?| |The important BMS Macros that you must learn to code, to design and create a complete map are - | |Q. How to code the DFHMSD Macro, to define a new Mapset?| |A Mapset is a collection of Maps. To define a New Mapset, you must code the DFHMSD Macro. The Syntax of the DFHMSD Macro is shown below - | First of all, you should assign a unique-name to your Mapset. I have chosen the name 'INQSET1' for my Mapset, which contains the Customer Inquiry Map. The TYPE Parameter what happens when this BMS Map Source Code, is Assembled. A TYPE=&SYSPARM means both a Physical Map and Symbolic Map will be generated. TYPE=DSECT generates only a Symbolic Map. TYPE=MAP generates only a Physical Map. The LANG Parameter indicates the Language, in which the Symbolic Map Variables are generated. Since, later you are gonna write a Program in COBOL, to process the User Inputs from this Map, I have coded LANG=COBOL. The MODE Parameter is usually coded as INOUT, so that the Symbolic Map will include both Input(I) and Output(O) COBOL Variables. The TERM Parameter is the type of terminal. For the Customer Inquiry Screen, I have coded TERM=ALL. The CTRL Parameter is used to specify the Control Options used by all the Maps in the Mapset. You may also specify the Control Options separately for each Map(Screen) in the Mapset, by coding the CTRL Parameter on each DFHMDI Macro. Control Options used frequently are FREEKB. When a Map is displayed on the Screen, by default the Keyboard is in a Locked State, and you'd have to press RESET Key, to type any inputs. To avoid this Keyboard Lock, you'd code a CTRL=FREEKB. ALARM is another Control Option. It causes an Beep Sound to be played when the Map is displayed on the screen. The DSATTS(Symbolic Map) and MAPATTS(Physical Map) parameters are used to specify which Extended Attributes will be supported. Extended Attributes include Color of the Field, whether its highlighted or not. The TIOAPFX Parameter causes a 12-bytes Filler Item to be included at the beginning of the Symbolic Map. As a thumb-rule, you must always specify TIOAPFX=YES for COBOL Maps. |Q. How to code the DFHMDI Macro, to define a New Map?| |To define a new Map within a Mapset, you must code the DFHMDI Macro. The general syntax of the DFHMDI Macro is shown below - | First, assign a unique name to you map. I have assigned the Name 'INQMAP1' to the Customer Inquiry Map. The SIZE specifies the size of the Map, in terms of number of rows and Columns. The standard size of most maps are 24 Rows x 80 Columns. The LINE parameter specifies the Row-Number where the Map actually starts. I have coded LINE=01 for my map. The COLUMN parameter specifies the Column-Number where the Map starts. I have coded COLUMN=01 for my map. |Q. How to code the DFHMDF Macro, to define a new field on the Map?| |The DFHMDF Macro is used to define the a new field on the Map. You must code one DFHMDF Macro for each field on the Map. What really happens is, when you code a DFHMDF Macro, it creates and Attribute Byte. As I've said before, for the Data-Entry Field, you'll have to explicitly code a closing attribute-byte, so for the Data-Entry field, I am going to code two DFHMDF Macros. | The general syntax of the DFHMDF Macro is shown below - First, code a label or Field-Name, if you'd like to process this field in the COBOL Program. For example, whatever the user enters in the '______' field, needs to be processed by the COBOL Program, so I have named this field CUSTNO. The POS Parameter specifies the co-ordinates in terms of row-no. and col-no. of the Field. For example, if you want to define the field 'Customer Number. . . . .' on the Row No. 5 and Column 1, you should code The LENGTH Parameter specifies the length of the Data in Field. For example for the field 'CUSTINQ1', the data contains 8 characters, so I should code LENGTH=08. This will actually occupy 9 Screen Positions = 8 Data Characters + 1 Attribute Byte. The next ATTRB Parameter is used to specify the Attributes of the Field. Some other options in the ATTRB Parameter include IC(Initial Cursor). IC Option in the ATTRB Parameter, indicates that the cursor should be placed at the start of this field, when the map is displayed on the screen. The FSET Option of the ATTRB Parameter causes the MDT to be turned on. The COLOR Parameter lets you specify a Color for your field. The PICIN and PICOUT Parameters lets you specify PICTURE Clauses for the Input and Output Cobol variables in the Symbolic Map. Here is the complete BMS Macro Source Code for the Customer Inquiry Screen, with all the DFHMSD, DFHMDI and DFHMDF Macros. |Q. How to assemble CICS Maps? What are Physical and Symbolic Maps?| |The CICS Map Source Code containing the BMS Macros must be Assembled and Link-Edited into a Load Module. A CICS Map is also a Program. To run Programs, you must compile the Source Program into a Load Module. Like-wise, to run a CICS Map and see what it looks like on the Terminal Screen, you must Assemble the Map Source Code into a Load Module. This Load Module is called Physical Map. | On the other hand, a Symbolic Map or DSECT is obtained when you assemble the CICS Map Source Code with DSECT Option. A Symbolic Map contains a List of COBOL Variables corresponding to the different fields on the screen. I have written a Assemble-Link Job to run the assemble CICS Map Source Code into Physical Map and Symbolic Map respectively. You should change the parameter values in the JCL Stream, to suit your needs and then run the Job. |Q. How do you define and Install the Map in CICS?| |To display and see the output of your Map on the Terminal Emulator, CICS Software needs to pick up your Physical Map(Load Module) from the Disk and load it into the Main Storage. To help CICS Software find your Load Module, you must add the appropriate entries in the CICS Tables. | First, use the CEDA Transaction define a new mapset. Enter the Mapset Name INQSET1, with Group-name MYGROUP. Second, use the CEDA Transaction to install the mapset. Enter the Mapset name as INQSET1. Enter the Group-name as MYGROUP. In a similar fashion, you need to DEFINE and INSTALL your Program INQSET1. Remember, that ultimately a MAP is also a Program. So, you need to CEDA DEFINE PROGRAM and CEDA INSTALL PROGRAM. |Q. How to display the Map on the CICS Terminal?| |Whenever you want to send anything to the CICS Terminal, you can use the special CICS Command SEND MAP. CICS provides a set of commands to you, called the Terminal Control Commands. When you want to send data to any terminal, you can use the CICS Command SEND MAP. On the same lines, when you want to receive data from the terminal, you can use the CICS Command RECEIVE MAP. | We want display the Map INQMAP1, within the Mapset INQSET1. Type the command CECI SEND MAP('INQMAP1') MAPSET('INQSET1') and hit Enter. You should get a warning message, "About to Execute Command". Hit Enter once more. After you press Enter, you should be able to see the Output of your Map on the CICS Terminal. "What on earth is a Mainframe?" is an absolute beginner's guide to mainframe computers. We'll introduce you to the hardware and peripherals. We'll talk about the operating system, the software installed on a mainframe. We'll also talk about the different people who work on a mainframe. In a nutshell, we'll de-mystify the mainframe.
OPCFW_CODE
import os import subprocess as sp def mute_scale_crop_movies(movies_original_path, movies_processed_path, image_size): if (not os.path.isdir(movies_original_path)): print('To preprocess the movies yourself, you need the original movie files.') return if (not os.path.isdir(movies_processed_path)): os.makedirs(movies_processed_path) for filename in os.listdir(movies_original_path): if filename.endswith(".mov"): ffmpeg_exe = 'ffmpeg.exe' command = [ffmpeg_exe, '-y', '-i', os.path.join(movies_original_path, filename), '-vf', 'crop=240:240:40:0, scale='+str(image_size)+':'+str(image_size), '-an', os.path.join(movies_processed_path, filename)] sp.call(command) script_directory = os.path.dirname(os.path.abspath(__file__)) image_size = 64 for i in range(6): mute_scale_crop_movies( os.path.join(script_directory, 'movies', '320x240', 'shooting-' + str(i+1)), os.path.join(script_directory, 'movies', str(image_size)+'x'+str(image_size), 'shooting-' + str(i+1)), image_size)
STACK_EDU
question: support for Electron env vars Hi. I have noticed different behaviour when running electron locally, and when running a packaged electron app. Specifically, env vars passed to the electron process don't seem to work in a packaged app. For example, if I build for darwin, then Electron executable in MacOS does not behave as a locally installed electron executable. >ELECTRON_RUN_AS_NODE=true electron -v will correctly outputs the node version. But this does not work when you have build an app using electronPackager: >ELECTRON_RUN_AS_NODE=true dist/darwin/MyApp-darwin-x64/MyApp.app/Contents/MacOS/Electron -v which will run the default app. Does electron-packager support env vars in this way? Edited: Some background: running the app MyApp.app works fine, but I also want to get access to the node executable from within my app from a script. I believe it runs the default app because you need to specify the app directory in the second command. @malept yep - makes sense. However, I'm trying to get to the underlying node executable directly. I guess my question could also be phrased as "Where is the node executable in a packaged Electron app?" :-) Good question. It's actually more obvious in builds that are not targeted for OS X. When you target for Windows, there is a node.dll file in the package. When you target for Linux, there is a libnode.so in the package. So the answer is, Electron is not packaged with a node executable. It is prebuilt, linked to a node library (I guess, statically, in the case of OS X). To your other question (how can I get the node version from a script), my guess is that you'll have to provide that functionality yourself via your app (and argument parsing). @malept thanks for your help on this. So on OSX, the node lib is MyApp.app/Contents/Frameworks/Electron Framework.framework/Libraries/libnode.dylib - equivalent to the node.dll on Win32 and libnode.so on Linux. +1 EG: Electron supports passing flags to the JS-engine like: $ electron --js-flags="--harmony_proxies --harmony_collections" your-app Is this supported? So on OSX, the node lib is MyApp.app/Contents/Frameworks/Electron Framework.framework/Libraries/libnode.dylib Ah, OK. I thought that was weird that only OS X was linked statically. All I did was a quick find on my app's out directory. I wonder if this might be a more relevant question for https://github.com/mafintosh/electron-prebuilt. Best place to ask it is actually in this repo http://github.com/atom/electron, they are the ones who actually create the .app in the first place, we just unzip it from the zip they upload in their releases and modify it thanks @maxogden - I'll raise there. Actually @maxogden, the following works fine: >npm i electron-prebuilt >ELECTRON_RUN_AS_NODE=1 node_modules/electron-prebuilt/dist/Electron.app/Contents/MacOS/Electron -v So if the actual .app is fine, then it must be something that happens after download. I've tried performing the stuff my build does such as removal of default_app etc, but Electron still works as expected. The only time that the env vars are ignored, is when running against the .app after running electron-packager. Just a quick update: I currently believe that the issue of not being able to pass env vars is introduced in electron-packager. Prebuilt electron works fine: >npm i electron-prebuilt >ELECTRON_RUN_AS_NODE=1 node_modules/electron-prebuilt/dist/Electron.app/Contents/MacOS/Electron -v >v5.1.1 Electron that has been through electron-packager: >ELECTRON_RUN_AS_NODE=1 node_modules/electron-prebuilt/dist/Electron.app/Contents/MacOS/Electron -v >v0.33.9 The electron-packager version seems to be ignoring the ELECTRON_RUN_AS_NODE flag. A call to electron with the flag set and no params should start the node interpreter, but instead, it launches the default app. I think we can rule out electron-download as being a culprit, as its used successfully in electron-prebuilt.
GITHUB_ARCHIVE
You can Register your Microsoft Visual Studio 2010 Express for Windows Phone 7 here, if you enjoyed this post, please consider leaving a comment or subscribing pianist hd full version to the RSS feed to have future articles delivered toRead more Player de vídeo gratuito para DVD, DivX, MP4, MKV, FLV, Xvid, arquivos ISO e outros.View Similar Sites m Similar Sites by Topic see top sites for the most relevant topics from m: 'http' see top sites for this topic 'converter'Read more Multiple.cab files with.inf and x86 binary bundled in single.cab king of fighters xiii full pc game The second strategy requires that one.cab file contain both the Windows 95 or later.inf file and the x86 binary. In this scheme, when a given website attempts to install an ActiveX control, the Installer Service will first validate that the website URL is on the approved installation listing or part of the Trusted sites zone.This option causes any required directories to be created, if necessary.Step 1, launch Internet Explorer by double clicking the program icon on your computers desktop.The end user will be prompted to manually download the zip package when they access the Web application with insufficient user privileges or incorrect settings in Internet Explorer.Exe Setting the compression type ( -m ) Set the compression type with the -m option.Dll FileVersion4,20,0,6164 hookmfc42installer mfc42.dll FileVersion4,2,0,6256 hookmfc42installer olepro32.dll jquery photo gallery plugin FileVersion4,2,0,6068 hookmfc42installer mfc42installer file-win32-x86b runextract_DIRmfc42.exe If you're bundling the.inf file and your control's x86 binary in a single.cab file, and if your control was written with Microsoft Foundation Classes (MFC).2, you can use the previous example after.Exe utility, which is another one of the tools you download with Authenticode technology, to create a key pair (the pair consists of a public and a private key). Step 2, select the Tools menu followed by clicking the Internet Options sub-menu. Step 6, repeat step 5 for all Internet zones that you want to enable ActiveX controls. Exe replaces the g and g files that were shipped as part of the ActiveX SDK.Close, platforms, popular links, categories, hELP settings, enter.Txt Options (-p, -P, english to spanish dictionary full version for pc -r, -s, -i, -m, -l, Path preservation ( -p ) Directory names are not preserved in the cabinet (by default only the file name component is stored.Avast Free Mac Security, aVG AntiVirus for Mac, virtualDJ.Ocx (the file that contains the sample control) to a subsection of the same name.This article introduces a data-compression technology and associated tool set that you can use to package your Microsoft ActiveX control for faster, more efficient downloading over the Internet or an intranet.Files are added in the order in which they are parsed on the command line.Exe The -s option does not actually write the code signature; it reserves space for it in the cabinet.Without this option, only the file names would be stored.Then, right-click the ActiveX installation policy for sites in Trusted Zone menu option located in the details pane.
OPCFW_CODE
Note: multiremote has been replaced by Multiremote.NET! I suggest grabbing that instead, since it's superior to multiremote in pretty much every way. However, this page will be here for a for a few weeks or so. Current version: 3.0 beta 4.01 Download binary here (most of you want this one) (1476 downloads since 2005-02-15) Download source code here (802 downloads since 2006-06-28) |Multiple application support ||Support for Apple iTunes, Winamp, Foobar 2000* and QCD Player**. * Seeking appears to be broken in Foobar 2000. ** Retrieving the current title is broken for QCD Player. ||Use hotkeys to control the applications and copy info about the current track to the clipboard, which enables to you paste (ctrl+v) it practically in any textbox, even in some games. ||You can specify your own formats, i.e. how multiremote handles the data it gets from winamp. See the readme for more info and a tutorial. |Tray balloon tips ||You can use multiremote to display the currently playing song in a balloon tip (screenshot), very useful if you've got your played minimized to tray like I do. Obviously, you can turn these off. ||Choosing your own hotkeys, allowing them to automatically start the player if necessary, automatically pressing ctrl+v/enter/ctrl+enter on copy to clipboard, and more. | 3.0 beta 4.01 |* Fixed: Song title is no longer displayed when pausing/unpausing in Winamp | 3.0 Beta 4 |* Added support for balloon tips * Fixed a few bugs that would crash multiremote when using it together with iTunes * Removed logging/output features due to memory leaks | 3.0 Beta 3 |* Fixed Autostart checkbox bug. * Made Winamp the default player (instead of not having any default player at all). * Added: Toggle shuffle / toggle repeat hotkeys for iTunes/Winamp/QCD Player. | 3.0 Beta 2 |* Changed log timer from every 10 seconds to every 30 seconds to save resources. * Fixed memory leak that occured when retrieving info from iTunes. * Fixed the "Autostart at windows startup" option * Fixed some other minor issues. * Added commandline argument support. You can only use one at a time, and the supported arguments are: * -prev, -play, -pause, -stop, -next, -volup, -voldown (self-explaining? if not, try reading again!) * -b (skip backwards) and -f (skip forwards). That's all, folks. I'm quite aware of the annoying memory leak when using the logging functions but I have no idea what's causing it, unfortunately. | 3.0 Beta 1 Copyright © 2005 by serenity (at exscape dot org) 7.302ms was spent creating this page
OPCFW_CODE
package org.firstinspires.ftc.teamcode.auto.vision; import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode; import org.firstinspires.ftc.teamcode.math.Pose; import org.firstinspires.ftc.teamcode.teleop.utility.Configuration; public interface VisionSystem { enum SkystonePosition { LEFT("stone l"), CENTER("stone c"), RIGHT("stone r"), NONE("stone r"); public final String key; SkystonePosition(String key) { this.key = key; } } enum CameraType { PHONE("Phone"), FRONT_WEBCAM("Webcam 1"), BACK_WEBCAM("Webcam 2"); final String name; CameraType(String name) { this.name = name; } public static CameraType stringToType(String string) { switch (string.toUpperCase()) { case "PHONE": return PHONE; case "WEBCAM 1": case "FRONT WEBCAM": default: return FRONT_WEBCAM; case "WEBCAM 2": case "BACK WEBCAM": return BACK_WEBCAM; } } } enum TargetType { SKYSTONE, BRIDGE, PERIMETER, ALL; public static TargetType stringToType(String string) { switch (string.toUpperCase()) { case "SKYSTONE": return SKYSTONE; case "BRIDGE": return BRIDGE; case "PERIMETER": return PERIMETER; default: return ALL; } } } void startLook(TargetType targetType); void stopLook(); }
STACK_EDU
So this is my script so basically like, erer is only true if v, which is a viewportframe that is suppose to have a number as the name, doesnt match any ge, which are boolvalues with number names inside game.Players.LocalPlayer.Floors:GetChildren(). So basically like, it is suppose to make it so that it only adds viewportframes inside the scrolling frame if there isnt the same one in the scrolling frame, but my script doesnt work in the matter that it ignores if a viewportframe is already in a scrolling frame and keeps adding it like its not there Heres a fun little animation I made to represent it. What I get: Ples someone help me pls, I don’t understand your question I said its not an array its a TABLE. table.find only works on arrays Like I have an idea that is to do another for i,v in pairs in the thing and add the contents into a dagtjndbkijnasjreeeeeeeeeee so like what im tryna do is like to make it so that it only inserts the viewportframe if there isnt any viewportframes that have the same name of the viewportframe thats supposed to be teh viewportframes name its rlly hard to explain and i dot have the time to do it rn cos i gotta go in like 30 minutes ok so what do i finhd with table.find cos its an array of objects and i need to specifically find the name the name of the object that is Can someone help me pleas i ony have 20 minutes left please sonenone help it isnt this hard Your code is very difficult to follow with your variable names. Choosing better variable names would make this code much cleaner. Your problem is that you are checking v.Name ~= ge.Name, when it should be You should then later check that ere == false. Here is some better code that uses good variable naming conventions to make your code easier to read. local player = game:GetService("Players").LocalPlayer local floors = player.Floors while wait(5) do for _, floor in ipairs(floors:GetChildren()) do local hasViewportFrame = false for _, frame in ipairs(script.Parent.ImageLabel.ScrollingFrame:GetChildren()) do if frame:IsA("ViewportFrame") and frame.Name == floor.Name then hasViewportFrame = true end end if not hasViewportFrame then -- make your viewport end end end A big concern I have with your current code is it’s on a loop every 5 seconds. Why is this? If you want to listen to floors being created/destroyed, you should use the appropriate API for that (ChildAdded/ChildRemoved) You could use a generic for and iterate through the array, and use rawequal to check if they’re the same. For example local IsInArray = false for _, Object in ipairs(Objects) do if rawequal(Object, Compare) then IsInArray = true break end end I hope this helped. That would be what you’d compare Object to. I probably could’ve come up with a better name lol. This is what I did and now it doesnt insert the viewportframe in the scrollingframes at all? while wait(5) do for i,v in pairs(game.Players.LocalPlayer.Floors:GetChildren()) do local erer = false for er,ge in pairs(script.Parent.ImageLabel.ScrollingFrame:GetChildren()) do if ge:IsA("ViewportFrame") then if v.Name==ge.Name then erer = true end end end if erer == false then What is “compare”?,
OPCFW_CODE
Where can fluorite be found on Long Island, New York? I'm interested in fluorite (Since Fluorine is my favorite element, and I wanted a sample of it, even if bonded, but I don't want to buy it; that would be no fun), and I'm curious if it can be found on Long Island, New York. Where would the largest geologic formations be that are accessible? If fluorine is your favourite element, you try to look for pegmatites. There you can usually find apatite, micas (biotite or muscovite) or even topaz that have fluorine in them. Fluorite is probably the most obvious mineral to look for when you're hunting for fluorine, but not the only one. Good luck! And you can look here: https://www.mindat.org/show.php?id=1576&ld=1#themap scroll down, there's a list of localities for fluorite and an interactive map. You might find something nearby. Long Island is entirely made up of glacial debris. It's hard enough finding glacial erratics, much less fluorite! I do not know of fluorite being reported to occur on Long Island. If fluorite is found in NYC, the Natural History museum might have specimens. You might try contacting a mineralogy / mineral collecting club on Long Island. Someone there would probably know for certain if fluorite can be found. Fluorite can be found in several limestone formations in New York. See map Personally, I have collected fluorite from the Penfield Quarry near Webster New York during the mid 1980s. Some of the waste rock from the quarry was used as breaker wall stone on the lake front in Webster New York. You could probably still collect fluorite crystals there. Here is a map of the lower 48 states showing mines where fluorite has been reported to occur. Other places I have personally collected fluorite Terlingua, Texas fluorspar mine (massive bedded fluorite) Spearfish Canyon Black Hills South Dakota (fluorite in geodes in limestone) Closed rock quarry in Pennsylvania near Breezewood, PA Locations where fluorite is still able to be collected. Mines/Mountains near Socorro New Mexico Mt Antero in Colorado, at high elevation. Bancroft Ontario - fluorite in calcite (fluorspar) Burin Peninsula, Newfoundland, Canada (fluorspar) References: Wikipedia Fluorite Maps created using Digital Rockhounds Companion software. Long Island is one giant moraine so finding anything specific is basically a needle in a haystack without a really detailed survey and even then it is mostly luck. This book will help you get the lay of the local geology. The NY Mineralogical Club might be a nice place to visit in general; they have some incredibly detailed surveys of the region. Minedat has a searchable mineral locality database, it pops up a few NY road cuts for fluorite as well as a couple fluorite variants. Low percentage, but that's half the fun, right? PS: you are fairly close to the Franklin Mineral and Sterling Hill Mining Museum where you can find some really rare fluorescent minerals in the old talus piles, including fluorescent fluorites, and you can search them for a nominal fee.
STACK_EXCHANGE
Lynda.com is no longer available for individual subscribers. Please go to LinkedIn Learning to access your account if you moved or to sign up for a new account. For additional support, reach out to email@example.com. Programming Foundations: Software Testing/QA with Meaghan Lewis Learn how to incorporate different kinds of software testing into your development cycle to help your team meet quality goals with every release. 53m 51s • COURSE Test Automation Foundations with Meaghan Lewis Get started in test automation. Learn how to integrate automated testing into your QA or DevOps workflow. 1h 9m • COURSE API Testing Foundations with Dave Westerveld Learn the basics of API testing. Discover how to use several robust tools for testing APIs at scale in an organization. 1h 44m • COURSE Behavior-Driven Development with Robin Beck Learn the basics of behavior-driven development (BDD). Discover how BDD can improve collaboration, refine requirements, and identify defects earlier in the software testing cycle. 1h 25m • COURSE Agile Testing with Ash Coleman Create higher-quality software faster, by implementing agile testing in your organization. Learn about the role of software testers and how testing can enhance your workflow. 49m 10s • COURSE Learning Selenium with Meaghan Lewis Learn how to use Selenium to test web applications. Find out how to write, automate, and run tests with the Selenium WebDriver API and Selenium Grid. 1h 15m • COURSE Selenium Essential Training with Meaghan Lewis Learn more about Selenium, including how to improve your use of WebDriver, advanced locators, component interactions, synchronizations, and more. 1h 59m • COURSE JMeter: Performance and Load Testing with Michael Smith Learn how to use Apache JMeter, a popular open-source performance testing tool, to create and execute load tests that help you highlight performance issues in your software. 32m 53s • COURSE Scripting for Testers with Dave Westerveld Automate software testing with scripting. Learn how to write simple Python scripts to script API calls, reporting, test site prep, and more. 1h 50m • COURSE Java: Testing with JUnit with Peggy Fisher Make your Java code error free. Learn how to integrate JUnit with popular tools and IDEs (Eclipse, NetBeans, IntelliJ, and Maven), and conduct unit testing for Java applications. 1h 10m • COURSE API Test Automation with SoapUI with Dave Westerveld Learn how to leverage SoapUI to fully automate testing of APIs. 1h 52m • COURSE Insights on Software Quality Engineering with Aaron Dolberg Join your instructor, a software quality engineer, to learn how to get started in quality assurance (QA), how QA fits in at companies of any size, and how agile has changed QA. 11m 43s • COURSE You'll learn software testing skills with these experts. Meaghan Lewis is a technical program manager at Microsoft. Meaghan currently focuses on creating and delivering learning experiences centered around emerging technology. Meaghan has a superpower in quality engineering and has worked in the QA field for nearly a decade, working at GitHub, startups, and at a tech consultancy. Meaghan has a passion for teaching and has spent years speaking at conferences and delivering online learning content. Meaghan is a California native and currently lives in sunny Oakland, California with her husband and two dogs. On a typical day you will most likely find her outside getting some fresh air by walking her dogs or gardening. She also enjoys Pilates, cooking spicy food, and watching scary movies. Dave Westerveld is test developer with many years of testing experience. Dave has been involved in the testing of many different projects, ranging from well-established products to helping out with the early stages of major new initiatives. He excels at solving automation problems in a team environment and has been involved in traditional automation initiatives at various stages. Dave has also helped to improve product quality through the creative use of automation tools and by helping build out automation frameworks. He has a desire to see teams efficiently producing high value software and is enthusiastic about understanding the ways that automation tools can be used to help with this goal. Currently, Dave works as a test developer at D2L, the world's first integrated learning platform. He is also an instructor, excited to be sharing his knowledge of testing at LinkedIn Learning. Robin Beck is a lead technical trainer who helps developers overcome infrastructure challenges. Ash Coleman is an engineering manager and consultant with years of digital experience. Ash's former career as a professional chef helped her establish a determination to understand and comply with user satisfaction, as well as build a career in using technology as a means to satisfy user demands. Her continual desire to mediate between business and digital fronts has been well served by the culmination of her experiences. Ash currently works as a quality engineering manager at Credit Karma. In addition, she's a founder and software QA and test consultant at QualityInclusive, an organization that focuses on business development by way of tailored software tests and processes. She also serves as a test manager and consultant on client projects. In his free time, Mike is a software testing evangelist, public speaker, technology blogger, and fiction author. Having held various unique and interesting jobs, Mike found his lifelong passion for technology came in handy when he first found himself in a software testing role in 2011. Since then, he's achieved certifications in ISTQB® Foundation, Advanced, Manager, and Agile Tester. He advanced to principal software tester in 2018, and began to create tooling and environments for other testers to use with the skills in Docker and AWS that he learned through his love of new and emerging tech. Moving into research and development, Mike now studies ethics in technology, machine learning, deep learning, and data visualization, with hopes to use his skills for the greater good. Peggy Fisher is a programmer and content manager at LinkedIn Learning. Peggy's main focus is application programming in Java, Arduino, and C++. She has also worked on courses in COBOL and discrete ,athematics. Previously she was a faculty member at Penn State University's College of Information Sciences and Technology. She started out as a programmer working for a large insurance company, but after 18 years she left her job as a director of information technology to pursue her true passion teaching. She earned a master's degree in math education, and went on to teach high school math and computer science in Pennsylvania. In 2012, Peggy accepted a position as an instructional designer at Penn State, and shortly thereafter began teaching Intro to Application Programming with Java. She is a strong supporter of women in STEM. As one of the few female programming teachers, she serves as a mentor to incoming female freshmen who are considering a career in programming. She was also the K–12 outreach coordinator for the college, where she scheduled, ran, and taught summer camps for middle school and high school students. In a PBS NewsHour interview, she expressed that all students should take at least one programming class either in high school or college. Peggy enjoys constantly learning and finding new and exciting ways to bring technology to life in and outside of the classroom, such as using Arduino microcontrollers or Lego Mindstorms, to help make learning hands-on and fun. Aaron Dolberg is the QA engineer behind software such as Flash Professional, Flex, and Shockwave Studio. Aaron Dolberg has been contributing to successful high-profile products in a quality capacity since 2000. He's been a member of the engineering teams that produced Flash Professional, Flash Catalyst, Flash Player, Flex, and Director Shockwave Studio to name a few. Aaron is a passionate user advocate and has presented at conferences and user groups to maintain a strong connection with the people who use the products he devotes his time to. He currently manages teams of quality engineers in the gaming industry and works to identify efficient ways to validate functionality, track overall team progress, and mitigate risk.
OPCFW_CODE
Well I sat down, thought carefully about it, and reorganized my proposed license along the same lines that I would organize a config file. Instead of enumerating what is allowed, deny this, deny that, deny the other, allow everything else. I think that this is a good way to rewrite it. It means that your obligations are much more clearly spelled out, and there is less micromanagement of what you may and may not do. However it is a significant rewrite, and I would like to see another iteration (hopefully with some legal input) before I try drawing this up as an RFP. Cheers, Ben PS This is significantly more lines, but I reformatted. It is around the same number of words. ------------------------------------------------------------- THE ARTISTIC LICENSE VERSION 2, SEPTEMBER 2000 Preamble The intent of this document is to enable you to use, distribute, modify, and borrow from this Package on terms as generous as can be conveniently managed without detracting from the ability of the developers of this Package to retain a semblance of artistic control over future development of this Package. While this license may stand on its own, it is intended to be used in a dual-licensing scheme, and may be incompatible with other software licenses when used on its own. Terms and Conditions 1. This License applies to any work containing a notice placed by the copyright holder or holders saying it is licensed in whole or in part under the terms of this Artistic License. The "Package", below, refers to such a work (be it a program, collection of programs, etc) or any derivative under Copyright law. A "Standard Version", below, refers to any such work which is licensed in its entirety under this Artistic License. Each licensee is addressed as "you". 2. You may not redistribute or modify this Package without meeting all copyright and license obligations. If this Package is a Standard Version, then the proffered contract attached to this license constitutes an acceptable arrangement. If this Package is not a Standard Version, then the proffered contract is sufficient to meet the obligations arising from this license. However there may be additional obligations. 3. If this is a Standard Version then you are free to use this Package as you see fit. The scripts and library files supplied as input to, produced as output from, linked to, or linked from the programs and libraries of this Package will not automatically fall under the copyright of this Package, but belong to whomever generated them. If this is not a Standard Version then no restrictions arise from this license on use, input, output, or linking, but such restrictions may arise from other licenses that are in effect. 4. The intermediate state of the programs of this package are covered by the copyright of this package. In particular should you attempt to produce a binary image using "undump", "unexec", or an equivalent including saved internal byte code, that binary image shall be considered a derivative work of this package. 5. The names of the contributers to this package may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT UNLESS REQUIRED BY LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER OR CONTRIBUTOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Proffered Contract for Distribution, Modification and Derivative Works Preamble This agreement is offered by the copyright holders for your convenience should you wish to modify or distribute a Standard Version or some derivative of a Standard Version. You have no obligation to accept it, however under Copyright law you will need permission to undertake the activities covered. Terms and Conditions 1. The definitions in the copyright statement apply to this contract. Furthermore "Original Version" shall refer to the Standard Version you received or the Standard Version that your Package is modified or derived from. A modification shall be called "includable in the Original Version" if it is available free of charge under terms allowing its application to any Standard Version licensed under the same terms as the Original Version (clearly this includes the Original Version) without causing conflict with distribution and change under existing licenses, or imposing additional restrictions beyond the inclusion of modest copyright notifications and acknowledgement of sources. 2. The permissions and requirements in this agreement only pertain to the copyrights and licenses arising from the Original Version. If this is not a Standard Version then statements of what is allowed shall be read as statements of what this agreement does not restrict you from doing. 3. You may apply modifications produced by the copyright holders or others so long as they are includable in the Original Version. 4. You may modify your copy of this Package as long as you insert a prominent notice in each changed file stating how and when you changed that file. Likewise you may borrow code as long as you clearly indicate in your own code where and when it was borrowed, and which Original Version it was borrowed from. Should you not seek to use the modified or derived version outside of personal use, within your corporation, or within your organization, you have no further obligations under this agreement. 5) You are always allowed to distribute modifications that are includable in the Original Version so long as they are distributed free of charge and there is no restriction preventing the person or persons you are distributing it to from redistributing them publically on the same terms. 6) An unburdened complete source distribution of a Standard Version shall refer to a a publically available distribution free of charge or license obligation of the complete source to a Standard Version, if applicable a summary of how that differs from a previous Standard Version it is derived from, and if applicable the exact modification in machine readable form. 7) Distribution of a Standard Version in source or binary form carries the following obligations: a) You must accompany your distribution with instructions on how to obtain an unburdened complete source distribution of your Standard Version. You need not be the party so distributing. You must make those instructions publically available. b) If your Standard Version differs from the Original Version, you must likewise include and make publically available instructions on how to obtain an unburdened complete source distribution of the Original Version. Additionally there must be a summary of the differences prominently included in your manual pages or equivalent. 8) If the Package is not a Standard Version and makes visible public or private interfaces derived from those used in the Original Version, then distribution in source or binary form carries the following obligations: a) You must accompany your distribution with instructions on how to obtain an unburdened complete source distribution of the Original Version. You must make those instructions publically available. b) You shall rename any executables or library files that you have modified to names not used in the Original Version and indicate the change in the manual page or equivalent. You shall not attempt, or encourage others to attempt, to make these accessible under names that conflict with those in the Original Version through symlinks, shell scripts, or other techniques. c) Any other executables and library files which expose public or private interfaces derived from those used in the Original Version shall indicate the influence in their manual page or equivalent. In addition as in 8b) you shall endeavour to keep their names from conflicting with the Original Version. 9) If the Package is not a Standard Version, then distribution in source form carries the same obligation as 8a) (if that was not already required) and must retain applicable statements indicating what the modifications were. 10) Should your instructions on how to obtain an unburdened complete source distribution of a Standard Version fail at some point you must upon request promptly locate an alternate source of such a distribution, providing it yourself if necessary. Note that distributing this version may require you to provide earlier versions. It is therefore suggested (but not required) that you rely on a source that is closely connected to the actual developers of the Original Version. 11) Subject to the limits in 2) above, you are allowed to charge for distribution of the Package, support, etc. You are also allowed to distribute it aggregated with other products. However you shall not advertise any Standard Version as your product. Nor shall you use the names of the copyright holders and developers of the Original Version to endorse or promote products derived from the Original Version without specific prior written permission. 12) Subject to the limitations laid out in 2) and 7)-11), if all necessary copyright and license statements are included then permission is granted to distribute the Package. _________________________________________________________________________ Get Your Private, Free E-mail from MSN Hotmail at http://www.hotmail.com. Share information about yourself, create your own public profile at http://profiles.msn.com.
OPCFW_CODE
// // BNRHypnosisViewController.swift // HypnoNerd // // Created by David Kobilnyk on 7/7/14. // Copyright (c) 2014 David Kobilnyk. All rights reserved. // import UIKit class BNRHypnosisViewController: UIViewController, UITextFieldDelegate { // ^ protocol is listed together with superclass, but superclass must be listed first override init() { super.init(nibName: "BNRHypnosisViewController", bundle: nil) // Set the tab bar item's title self.tabBarItem.title = "Hypnotize" // Create a UIImage from a file // This will use Hypno@2x on retina display devices let image = UIImage(named: "Hypno.png") // "'imageNamed is unavailable: use object construction 'UIImage(named:)'" // Put that image on the tab bar item self.tabBarItem.image = image } required init(coder aDecoder: NSCoder) { fatalError("NSCoding not implemented") } override func loadView() { // Create a view let frame = UIScreen.mainScreen().bounds let backgroundView = BNRHypnosisView(frame: frame) let textFieldRect = CGRectMake(40, 70, 240, 30) let textField = UITextField(frame: textFieldRect) // Setting the border style on the text field will allow us to see it more easily textField.borderStyle = .RoundedRect // Swift infers UITextBorderStyle.RoundedRect textField.placeholder = "Hypnotize me" textField.returnKeyType = .Done // Swift infers UIReturnKeyType.Done backgroundView.addSubview(textField) textField.delegate = self // Set it as *the* view of this view controller self.view = backgroundView } override func viewDidLoad() { // Always call the super implementation of viewDidLoad super.viewDidLoad() println("BNRHypnosisViewController loaded its view") } func textFieldShouldReturn(textField: UITextField) -> Bool { self.drawHypnoticMessage(textField.text) textField.text = "" textField.resignFirstResponder() return true } func drawHypnoticMessage(message: NSString) { for _ in 0..<20 { // use _ since we don't need the value anywhere let messageLabel = UILabel() // Configure the label's colors and text messageLabel.backgroundColor = UIColor.clearColor() messageLabel.textColor = UIColor.whiteColor() messageLabel.text = message // This method resizes the label, which will be relative // to the text that it is displaying messageLabel.sizeToFit() // Get a random x value that fits within the hypnosis view's width let width = UInt32(self.view.bounds.size.width - messageLabel.bounds.size.width) let x = arc4random_uniform(width) // Get a random y value that fits within the hypnosis view's height let height = UInt32(self.view.bounds.size.height - messageLabel.bounds.size.height) let y = arc4random_uniform(height) // Update the label's frame messageLabel.frame.origin = CGPoint(x: CGFloat(UInt(x)), y: CGFloat(UInt(y))) // Have to convert to UInt first before converting to CGFloat since beta 4 // Add the label to the hierarchy self.view.addSubview(messageLabel) var motionEffect = UIInterpolatingMotionEffect(keyPath: "center.x", type: .TiltAlongHorizontalAxis) // Swift infers UIInterpolatingMotionEffectType.TiltAlongHorizontalAxis motionEffect.minimumRelativeValue = -25 motionEffect.maximumRelativeValue = 25 messageLabel.addMotionEffect(motionEffect) motionEffect = UIInterpolatingMotionEffect(keyPath: "center.y", type: .TiltAlongVerticalAxis) motionEffect.minimumRelativeValue = -25 motionEffect.maximumRelativeValue = 25 messageLabel.addMotionEffect(motionEffect) } } }
STACK_EDU
SmartBear announced a new version of its API design and documentation tool, SwaggerHub, integrating Stoplight’s API open source tools. As a security professional of course I'd love to see more security automation deeply integrated into the development process. Everybody knows since the 1990s that security as an afterthought just doesn't work, yet we keep doing it. The reason, I think, is because it's very hard to automate security. So we'll start with the low hanging fruits as that's what your adversary will do (or does already). DAST (Dynamic Application Security Testing): If you deal with websites, implement a web application scanner that performs authenticated tests on your site. It does require some work to set up, it's not just point to and URL and click start. You shouldn't believe vendors who say that. Better tools allow you to “teach” them how to use the web app so the scanner can navigate through every function instead of just dumbly crawling the pages. This is a relatively small effort from your engineers and the security team can fully monitor the results. Definitely a quick win. Security Control Assessment (SCA): Next, or even in parallel, you NEED software composition analysis. That's the fancy name for 3rd party library scanning. You can have the best secure development process in place – if you only focus on code, what your engineers create, you're doomed. Everybody uses open source. And like every other code it has vulnerabilities. Most often with publicly available exploits. The good thing about SCA tools is that the false positive rate is very low as it uses the information included in the used libraries themselves to identify their version and then the tool checks if there are any vulnerabilities published for that particular version in the CVE database. You need to integrate the SCA tool into the development pipeline, because libraries that do not have vulnerabilities today might show some critical flaws tomorrow – capturing them early on is your best chance. In a similar fashion, you can significantly improve container security. While containers are typically dynamic and sometimes do not live long, it does not mean they are flawless or invulnerable. Look for a container-native solution to be able to do things like runtime vulnerability management, configuration baselining, checking for components with active CVEs, or active threat protection or even forensics. SAST (Static Application Security Testing): Of course we must talk about static analysis. It is not a silver bullet for sure, but again it can pick those low hanging ones for you. Start with a relatively tight scope, critical findings for example and work backwards, opening the scope one-by-one. But don't even bother to do one-time scans, either integrate the tool into your pipeline or just forget about it and go work on something else. Don't try to enforce the rules (no matter how few you have) on the whole codebase, but DO apply them on any new code going forward as you can eliminate vulnerabilities early. The legacy part again can be worked on line-by-line, module-by-module. Once you have a webapp scanner in place and SCA is checking your external dependencies and you are doing SAST on any new code, then go back to dynamic analysis and get some protocol fuzzers and API testing tools. IAST (Interactive Application Security Testing) is kind of a niche thing, but it is surely powerful: You have an agent sitting on your test environment instrumenting everything and seeing process memory will allow it to interact with the application and find defects in real time, fully automatically. Why not forget everything above and just do LAST? Because it's hard to deploy, needs extremely technical high touch maintenance. Nevertheless, it's the future and you should keep an eye out on this emerging testing technology. So much about technology, let's not forget yet another very powerful tool, bug bounties. Integrating a bounty program into your development process will allow you to capture actual, exploitable findings and fix them in a timely manner. As always, start small, even consider a company internal program first.
OPCFW_CODE
I've done a lot of programming, and also quite a bit of acting. The types of thinking "oblique strategies" suggest can be amazing for figuring out a new different way to act a scene -- an absolute goldmine. But they're of absolutely zero use in programming. Similarly, the kind of logical and analytical thinking that helps in programming... seems to be purely harmful in acting. It actually took me quite a long time to realize how fundamentally different the kind of thinking needed for creative vs. analytical tasks is. At least for me, there is almost zero useful transference. If anything, being good at one interferes with the other, until you learn to keep the skills separate. In my experience, the few times I've the most creative ideas, I had them in the bathroom -- and usually after I've been turning over ideas (writing and rewriting in Google Docs) for days. (Writing helped with converging -- it's not as useful for diverging. Talking to others usually helps for the latter but it's hit or miss.) Oblique Strategies cards sound like they would be useful for certain artistic domains. When I was taking creative writing and storytelling classes, I had a lot of trouble generating story ideas. I used similar randomizer aids (as writing prompts) to see latent possibilities. There are similar tools for generating startup ideas but I wonder if anyone's ever executed on any of these: Coincidentally I just revived and tweaked it a bit yesterday, results should be a better from here on. Tweets more or less daily. Brian Eno and Will Wright discussed generative systems at a Long Now Foundation seminar in 2006: Brian Eno and Danny Hillis discussed "The Long Now, Now" at a Long Now Foundation seminar in 2010: Brian Eno, Steward Brand, and Alexander Rose discuss "Long Finance" at a Long Now Foundation seminar in 2010: Create your own with the Brian Eno Bloom app too :) The default is, of course, Tarot card readings which has a nicely formatted text and some analysis in the dariusk/corpora repo . It'd be nice to have something similar so others can play around with it without fear of copyright infringement. It picks a card based on the recorded time on-press-up as well as the current accelerometer reading. I've used it for many bugs while programming. I find that the little bit of insight/encouragement goes a long way. It has a very unique case made out of Dupont Corion, a material I’ve never seen anywhere else in my life. It feels and looks like white marble but is actually a form of plastic. Some of the cards have been redesigned in collaboration with Peter Norton and Brian Eno, and there’s a unique intro card too. Details on the Peter Norton special edition: Took some pics just now: Special Peter Norton card here: Full gallery here: But you can get a feel for the idea online, eg https://www.joshharrison.net/oblique-strategies/ This site used to have some really funky color combinations that were inspiring all on their won, but unfortunately it's a little bland now. I can definitely see how this could work (I've used writing prompts in the past and they've helped). Just wondered if there were examples outside of the writing world. True, creativity is hard to learn or teach, but it is something you can prepare yourself for and train. Also, I might get incensed by just the fact that I have to wait for traffic-lights to change! So having a mental task to perform at that time seems to help. It feels more like an opportunity then than obstacle. I don't do it much these days but now that I read about Eno's oblique strategies maybe I'll give this some more attention :-) They're a bit more concrete than Oblique Strategies which makes them a bit more useful as a tool rather than just a prompt or something to think on. Even more interesting is that Robert Fripp (a frequent Eno collaborator) composed the startup sound for Windows Vista: https://www.youtube.com/watch?v=B-LYZt2jisE For a long time I’ve thought of creating my own version of the deck. I’ve heard it can be used punctually to get out of a dead end, or from the very start through the whole creative process. (Years before they made this, I was briefly acquainted with one of the creators not named John August.) Where Holzer's statements can be used to explain, Eno's are to resolve. It was mainly an experiment to see how changing character spacing could create different forms, and then loosely relating those forms back to the prompts. A tool for Creativity born in the Polytechnic University of Milan. A synthesis of Design, Tarot and Gestalt Psychology. There's a twitter bot that randomly posts the cards , which is cool too
OPCFW_CODE
AWS S3 (Simple Storage Service) is a highly scalable and durable object storage service offered by Amazon Web Services (AWS). AWS S3 is one of the most used service offerings of AWS and it is built to provide high-performance and zero-latency object(file) storage with security standards and compliance like HIPAA, PCI-DSS, FedRAMP We often use the AWS Management Console to browse S3 and complete simple tasks there are other programmatic ways such as CLI, SDKs (boto) In addition to the AWS Management Console and SDKs, AWS S3 also offers two Command Line Interface (CLI) tool, to interact with S3 using commands in the terminal. Both these commands are indispensable and have their own features and use cases to solve. In this article, we will explore the AWS S3API CLI command and provide examples of how to use it effectively to interact with your S3 buckets and objects. We will also see the difference of AWS S3 and S3API command Before we move on, S3API CLI is part of the AWS CLI suite. You need to install AWS CLI to use the both S3 and S3API commands Use the following instructions to set up your programmatic access and install and configure AWS CLI Setup your Programmatic Access - Create Access Key If you would like to create a new user in IAM along with the Access Key follow these steps. Login to AWS Console In the services go to IAM Create a User and Click on map existing Policies Choose Username and Select the Policy (Administrator Access Policy) The final Stage would present the AccessKEY and Secret Access like given below. If you would like to Choose the existing user and create an Access Key follow this - Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. - In the navigation pane, choose Users. - Choose the name of the user whose access keys you want to create, and then choose the Security credentials tab. - In the Access Keys section, choose to Create an access key. - To view the new access key pair, choose Show. You will not have access to the secret access key again after this dialogue box closes. ( Refer the image given above) Install AWS CLI Based on your base machine the AWS CLI installation and command might vary. AWS has given clear instructions on how to install AWS CLI on each platform. Choose any of the following links and get your AWS CLI installed and ready - Installing AWS CLI version 2 on Linux or Unix - Installing AWS CLI version 2 on macOS - Installing AWS CLI version 2 on Windows Configure AWS CLI I presume that you have installed the AWS CLI package and that everything went well. You should be able to see the version of the AWS CLI installed when entering the following command in your terminal or command prompt I am using the AWS CLI Version1 as CLI Version 2 is still on Beta. Now it is time to configure the AWS CLI, Just enter the following command and you would be prompted a few questions about the Access Key and Passwords. it would look like this as you are setting it up. You enter your own AWS Access Key ID and Secret Access Key and the one is given below is not correct. Just a made-up. ➜ ~ aws configure AWS Access Key ID [None]: AKIAS790KQGK63WUK6T5 AWS Secret Access Key [None]: kkQEiBjJSKrDkWBLO9G/JJKQWIOKL/CpHjMGyoiJWW Default region name [None]: us-east-1 Default output format [None]: Well done. You are ready with AWS CLI Quick Syntax of AWS CLI Commands Before we are going in further, let me quickly give you the syntax of AWS CLI commands aws [options] <command> <subcommand> [<subcommand> ...] [parameters] aws CLI should be invoked with a command and a subcommand. AWS CLI provides us with a huge list of commands and their associated subcommands and their documentation is also awesome. Before we move on to our objective s3API command let us see the difference between S3 and S3API AWS CLI S3 vs S3API - What's the difference? There is a high chance you might be already aware of similar AWS CLI command named S3. We often get this question, what is the difference between S3 and S3API To start with, S3API is a direct invocation command of low-level API if you look at the commands of s3API, All of them would have names like PUT, GET, and HEAD as close resemblance to HTTP REST API The S3API command provides more options and features than the S3 command. It is fair advanced and covers almost all the S3 administration/management tasks It has around 95 subcommands as of Jun 2023 I have put all of them in the following image for your glance On the other hand, The S3 command is an abstraction for those who do not want to perform multiple lower API calls for a task For example, If you want to SYNC two s3 buckets. aws s3 can easily do it with aws s3 sync command while the s3api has to do it the hard way AWS S3 CLI has a set of curated commands to address the frequent use cases like list, copy, move etc These are the list of commands available with aws S3 while aws S3API - cp - Copy objects - mv - Move Objects - ls - List Objects - sync - to Sync Buckets - mb - Create a Bucket - rb - Remove a Bucket - presign - Create a resigned URL for an object - rm - remove an object You can just do the match 95 vs 6 subcommands. As you see the s3API is fair advanced than aws s3 but it has curated workspaces for easy use which makes both indispensable S3API CLI Command Examples The S3API CLI command allows you to perform a wide range of operations on your S3 buckets and objects. It provides low-level access to S3 functionality and can be useful for advanced use cases. The basic syntax for the S3API CLI command is as follows: aws s3api <operation> [options] <operation> represents the specific S3 operation you want to perform, and [options] are additional parameters or flags specific to that operation? Now, let's explore some common S3API CLI commands with examples: Creating an S3 Bucket using S3API with create-bucket To create an S3 bucket using the S3API CLI command, you can use the create-bucket operation. Specify the bucket name using the --bucket parameter and the region using the --region parameter. Here's an example: aws s3api create-bucket --bucket my-bucket-name --region us-east-2 Uploading an Object to an S3 Bucket using S3API put-object To upload a file or object to an S3 bucket, you can use the put-object operation. Provide the bucket name using the --bucket parameter and the file path using the --body parameter. Optionally, you can specify the object key using the --key parameter. Here's an example: aws s3api put-object --bucket my-bucket-name --key path/to/my-object.txt --body /path/to/local-file.txt Listing Objects in an S3 Bucket S3API list-objects To list all objects in an S3 bucket, you can use the list-objects operation. Specify the bucket name using the --bucket parameter. This command will return a list of objects along with their metadata. Here's an example: aws s3api list-objects --bucket my-bucket-name Downloading an Object from an S3 Bucket with S3API get-object To download an object from an S3 bucket to your local machine, you can use the get-object operation. Provide the bucket name using the --bucket parameter and the object key using the --key parameter. Specify the local file path where the object will be saved using the --output parameter. Here's an example: aws s3api get-object --bucket my-bucket-name --key path/to/my-object.txt --output /path/to/local-file.txt Get Object/file metadata with head-object To fetch the document metadata you can use the head-object command of s3api, this is to simply fetch the object's metadata If the given object does not exist it would return 404 which is an indication that the requested object is not present in the bucket Mostly we use this to validate if a certain object/file is present or not before we take any subsequent action like deleting or overwriting etc Here is the sample command you can use to try. aws s3api head-object --bucket my-bucket-name --key index.html The preceding command fetches the metadata of the file index.html if the object is present otherwise it would return Object not found with 404 Deleting an Object from an S3 Bucket with delete-object To delete an object from an S3 bucket, you can use the delete-object operation. Specify the bucket name using the --bucket parameter and the object key using the --key parameter. Here's an example: aws s3api delete-object --bucket my-bucket-name --key path/to/my-object.txt These examples provide a glimpse of what you can achieve with the S3API CLI command. However, AWS S3API offers a wide range of operations, including managing bucket policies, setting access control lists (ACLs), and configuring bucket notifications, among others. We have listed all the available subcommands of S3API earlier, Refer to the AWS CLI documentation for a complete list of S3API operations and their respective options. The AWS S3API CLI command is a powerful tool that enables you to interact with your S3 buckets and objects using a command-line interface. In this article, we covered some common S3API operations, such as - creating buckets, - uploading and downloading objects, - listing objects - deleting objects. Hope it helps Signup for Exclusive "Subscriber-only" Content
OPCFW_CODE
We all know that a megabyte in binary system is not the same as one million bytes (in decimal system). But have you actually cared much about it? I have to admit I haven't. I know there is a small rounding error, but by and large I always treated 2^10 = 1 kB = 1024 bytes and 10^3 = 1 kB = 1000 as the same thing. (Update: Opening sentence was edited to remove units MB and MiB since it seems even I managed to use them backwards! The math in this article is correct. The rest of the article uses MB, GB and TB mostly to refer to binary magnitudes, which is apparently incorrect. See comments for wikipedia links and discussion.) More importantly, when you move into larger numbers, rounding errors usually become even less important. Unfortunately, in this case they become bigger: |magnitude||binary||decimal||% error||absolute error||in kB||% error| |1 kB||1024||1000||2.4%||24 bytes||1||0.0%| |1 MB||1048576||1000000||4.9%||48.6 kB||1024||2.4%| |1 GB||1073741824||1000000000||7.4%||73.74 MB||1048576||4.9%| |100 GB||107374182400||100000000000||7.4%||7.4 GB||104857600||4.9%| |1 TB||1099511627776||1000000000000||10.0%||100 GB||1073741824||7.4%| The above table makes it clear why these rounding errors are becoming a problem even if we could ignore them previously. When we had megabytes or even a few gigabytes of RAM, it didn't matter. In the 1 GB region the error is 74 MB, but I don't want to use up the last 100 MB of RAM anyway, so the rounding error is smaller than anything I care about anyway. But now I'm working with a server that has more than 100GB of memory. Suddenly the difference in getting your numbers right makes 7 and a half GB worth of difference! 7+ gigs of RAM costs a lot, so of course I will not be leaving that much unused but will try to use it up until the last gigabyte. But to use that RAM, I need to know how much precisely there is to use! And if that didn't convince you, in a year we'll see the first terabyte level systems come out. At that point the rounding error is 100GB! And then there is top. I use top because that's what everyone else does too. It has a lot of numbers that are fun to watch. I watch them like everyone else does too. But I don't really understand what they mean. The reason is that top wants to show me all values in kB (kilobytes). I believe WTF! is an appropriate expression... I could learn to live with the discrepancies between binary and decimal magnitudes. But this idiot utility chooses to show me values that are neither. I can learn a few numbers by heart: 107 billion is the same as 1 GB, or 132 billion is actually 128 GB, or 109 trillion is 1 TB. But 104 something...? What is that???? I'll tell you what it is. It is 104*1000*1000*1024 which is about 100 GB. I think top is doing us a great disservice with this approach. It uses numbers that are in their own alternative universe, so it's an additional burden to try to remember what they really mean. It mixes binary (1024) and decimal (1000) arithmetics into the same value which is nonsensical. And worst of all, it makes the rounding error smaller, so it is more tempting to continue to ignore it. But an error of 5 GB is not much better than an error of 7.4 GB, so you can't really ignore it after all! But wait! you say. GNU utilities always support the -h option to output "human readable" values for cases like this. Guess what: top doesn't :-( But if you really read the man page to the end, and make an enlightened guess of what some cryptic sentences means, it turns out that top -M is what you want! (I believe the "M" is for "Megabytes", but it also shows gigabytes just fine.) # top -M top - 08:13:04 up 19 days, 19:44, 1 user, load average: 0.37, 0.44, 0.38 Tasks: 245 total, 1 running, 244 sleeping, 0 stopped, 0 zombie Cpu(s): 0.4%us, 0.2%sy, 0.0%ni, 99.1%id, 0.2%wa, 0.0%hi, 0.1%si, 0.0%st Mem: 125.974G total, 125.568G used, 416.125M free, 140.949M buffers Swap: 1023.992M total, 256.000k used, 1023.742M free, 15.407G cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 8002 mysql 15 0 108g 108g 6412 S 7.9 85.9 2091:36 /usr/sbin/mysqld --basedir=/usr --datadir=/data/mysql/var --plugin-dir=/usr/lib64/mysql/plugin --user=mysql --log-err 25576 root 15 0 12876 1204 808 R 1.0 0.0 0:00.03 top -M 1 root 15 0 10352 684 576 S 0.0 0.0 0:02.38 init Except, there is more... From the above you'd think there is a MySQL server that is consuming 108 GB of memory. Unfortunately that's not true :-( # cat /proc/8002/status VmPeak: 115257328 kB VmSize: 114234120 kB VmLck: 0 kB VmHWM: 113439968 kB VmRSS: 113439968 kB Stupid fscking kB values everywhere! If I've googled this correctly, then VmSize should correspond to the VIRT column in top, and VmRSS to the RES column. These are the total memory allocated (virtual memory) and "resident memory", which is the physical amoun of RAM consumed. VmPeak is the maximum amount of memory ever allocated at a single point in time, meaning some memory has been released compared to that peak value. So let's do the math just this one time: 114234120 kB = (114234120 * 1024)/(1024 * 1024 * 1024) GB = 108.9421463013 GB. Looks more like 109 GB than 108 GB to me. I've been monitoring this for a while, and unfortunately it seems that at least for the values in the process list, top simply cuts the number rather than properly rounding it. So this is another place where you can simply lose another gigabyte if you make the mistake of trusting top. For this latter rounding error there seems to be no cure other than simply not using top if you want gigabyte precision (yeah, sounds like a lot to ask...). Or you could mentally add +1 to all the numbers to be on the safe side, which is rather silly.
OPCFW_CODE
Comment on “Bayesian astrostatistics: a backward look to the future” by Tom Loredo, arXiv:1208.3036 This short note points out two of the incongruences that I find in the loredo12 comments on andreon2012bayesian, i.e. on my chapter written for the book “Astrostatistical Challenges for the New Astronomy”. First, I find illogic the Loredo decision of putting my chapter among those presenting simple models, because one of the models illustrated in my chapter is qualified by him as “impressing for his complexity”. Second, Loredo criticizes my chapter at one location confusing it with another paper by another author, because my chapter do not touch the subject mentioned by loredo12 critics, the comparison between Bayesian and frequentist fitting models. loredo12 paper, “Bayesian astrostatistics: a backward look to the future” is, as indicated in his Comment line, a lightly revised version of a chapter in “Astrostatistical Challenges for the New Astronomy”, the inaugural volume for the Springer Series in Astrostatistics. This volume also includes a chapter written by me (andreon2012bayesian), and one by March et al. My chapter aims to show the simplicity of performing a Bayesian statistical analysis with two examples, the first one is original and has been first presented in andreon2010scaling, the second example is drawn from March11. In Sect. 4.3 of my chapter I also consider a model with increased complexity by inserting new nodes in the March11 model to check model adequacy. I disagree with several loredo12 comments on my chapter, but I will restrict my attention here to two of them. loredo12 paper111At the time of this writing there is one single version of the paper, arXiv:1208.3036v1, and this is the version which I’m referring to. splits chapters of the volume in two groups according to the complexity of the used models. The first one is composed by chapters using simpler models and my chapter belongs to this group according to loredo12. The other group is composed by contributions using more complex models, and includes the March et al. chapter. The March et al. model is commented by him as “impressing for his complexity” (his pag 20). Unfortunately for loredo12, the March et al. model is also the model adopted in my second example, and thus my chapter unambigously deals with complex models, and must be put among those dealing with complex models. To be pedant, in Sect. 4.3 of my chapter I add complexity to March et al. model, and in Sect. 3 I present one more model, developed by myself & Hurn, of similar complexity. About the second incongruence, loredo12 paper, after referring a few times to my chapter as “Andreon’s” chapter, topic, contribution or just “Andreon’s”, comments: “In the context of nonlinear regression with measurement error –Andreon’s topic– Carroll et al. (2006) provides a useful entry point to both Bayesian and frequentist literature, incidentally also describing a number of frequentist approaches to such problems that would be more fair competitors to Bayesian MLMs than the approach that serves as Andreon’s straw man competitor.” Unfortunately for loredo12, in my chapter (andreon2012bayesian) there is no comparison between a Bayesian and fit or any other frequentist fitting method. There are no frequentist approaches to regression in my chapter. There is no competition at all between methods, because only a single method (Bayesian) is used to fit the data. loredo12 is criticizing a comparison between frequentist and Bayesian fitting models, but there is no trace of it in my chapter, simply and plainly he is confusing my chapter with another paper, although my chapter get his criticisms. To sum up, I find illogic the loredo12 decision of putting my chapter among those presenting simple models, given his claim that one of models illustrated in my chapter is “impressing for his complexity” and the other one has similar complexity. Second, loredo12 criticizes my chapter at one location confusing it with another paper by another author, because my chapter do not touch the subject criticized by Loredo, the comparison of frequentist and Bayesian fitting models.
OPCFW_CODE
using MarsRovers.Net.Types; using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.Collections.Generic; using System.Text; namespace MarsRovers.Net.Tests.UnitTests { [TestClass] [TestCategory(Category.UnitTest)] public class TypeSystemTests { [TestMethod] public void DirectionLeftAndRight() { var direction = Direction.North; Assert.AreEqual(Direction.West, direction.Left); Assert.AreEqual(Direction.East, direction.Right); } [TestMethod] public void DirectionEquality() { Assert.AreEqual(Direction.North, new Direction(0)); Assert.AreEqual(Direction.East, new Direction(90)); Assert.AreEqual(Direction.South, new Direction(180)); Assert.AreEqual(Direction.West, new Direction(270)); Assert.AreEqual(Direction.North, default(Direction)); } [TestMethod] public void DirectionConversions() { var direction = Direction.East; Assert.AreEqual<int>(90, direction); } [TestMethod] public void DirectionToString() { Assert.AreEqual("N", Direction.North.ToString()); Assert.AreEqual("E", Direction.East.ToString()); Assert.AreEqual("S", Direction.South.ToString()); Assert.AreEqual("W", Direction.West.ToString()); } [TestMethod] public void DirectionToVector() { var vector = Direction.North * 1; Assert.AreEqual(new Vector(Direction.North, 1), vector); } [TestMethod] public void PositionEquality() { Assert.AreEqual(new Position(1, 2), new Position(1, 2)); Assert.AreEqual(new Position(0, 0), default(Position)); } [TestMethod] public void PositionPlusVector() { var oneNorth = Direction.North * 1; var positionToTheNorth = new Position(0, 0) + oneNorth; Assert.AreEqual(new Position(0, 1), positionToTheNorth); } [TestMethod] public void PositionDestructuring() { var (x, y) = new Position(1, 2); Assert.AreEqual(1, x); Assert.AreEqual(2, y); } } }
STACK_EDU
Crash on startup If you are creating an issue for a BUG please fill out this information. If you are asking a question or requesting a feature you can delete the sections below. Failure to fill out this information will result in this issue being closed. If you post a full stack trace in a bug it will be closed, please post it to http://gist.github.com and then post the link here. Bug Information Version Number of Plugin: 1.0.7 and 2.0.1 Alpha Device Tested On: HTC 10 Simulator Tested On: Version of VS: 2017 15.3.5 Version of Xamarin: <IP_ADDRESS> Versions of other things you are using: <IP_ADDRESS> Steps to reproduce the Behavior I start the application Expected Behavior The app starts and I receive a new token. Actual Behavior The app crashes during launch with the following exception: 10-05 21:13:57.590 E/mono-rt (14563): [ERROR] FATAL UNHANDLED EXCEPTION: Java.Lang.IllegalStateException: Default FirebaseApp is not initialized in this process com.postkca.friendsappdate. Make sure to call FirebaseApp.initializeApp(Context) first. 10-05 21:13:57.590 E/mono-rt (14563): at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw () [0x0000c] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at Java.Interop.JniEnvironment+StaticMethods.CallStaticObjectMethod (Java.Interop.JniObjectReference type, Java.Interop.JniMethodInfo method) [0x00068] in <e499a5498b1b48379b88fe5ed629079f>:0 10-05 21:13:57.590 E/mono-rt (14563): at Android.Runtime.JNIEnv.CallStaticObjectMethod (System.IntPtr jclass, System.IntPtr jmethod) [0x0000e] in <2e14bb2dd93a405e81838369ed72695b>:0 10-05 21:13:57.590 E/mono-rt (14563): at Firebase.Iid.FirebaseInstanceId.get_Instance () [0x00032] in <09a4f89addb34f02af055f39e6c540b7>:0 10-05 21:13:57.590 E/mono-rt (14563): at Plugin.FirebasePushNotification.FirebasePushNotificationManager+<>c__DisplayClass32_0.<Initialize>b__0 (System.Object state) [0x00177] in C:\Plugins\FirebasePushNotification\src\Plugin.FirebasePushNotification.Android\FirebasePushNotificationManager.cs:134 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading.QueueUserWorkItemCallback.WaitCallback_Context (System.Object state) [0x00007] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading.ExecutionContext.RunInternal (System.Threading.ExecutionContext executionContext, System.Threading.ContextCallback callback, System.Object state, System.Boolean preserveSyncCtx) [0x00071] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading.ExecutionContext.Run (System.Threading.ExecutionContext executionContext, System.Threading.ContextCallback callback, System.Object state, System.Boolean preserveSyncCtx) [0x00000] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading.QueueUserWorkItemCallback.System.Threading.IThreadPoolWorkItem.ExecuteWorkItem () [0x00021] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading.ThreadPoolWorkQueue.Dispatch () [0x00074] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): at System.Threading._ThreadPoolWaitCallback.PerformWaitCallback () [0x00000] in <896ad1d315ca4ba7b117efb8dacaedcf>:0 10-05 21:13:57.590 E/mono-rt (14563): --- End of managed Java.Lang.IllegalStateException stack trace --- 10-05 21:13:57.590 E/mono-rt (14563): java.lang.IllegalStateException: Default FirebaseApp is not initialized in this process com.postkca.friendsappdate. Make sure to call FirebaseApp.initializeApp(Context) first. 10-05 21:13:57.590 E/mono-rt (14563): at com.google.firebase.FirebaseApp.getInstance(Unknown Source) 10-05 21:13:57.590 E/mono-rt (14563): at com.google.firebase.iid.FirebaseInstanceId.getInstance(Unknown Source) 10-05 21:13:57.590 E/mono-rt (14563): Code snippet My Initialization is: FirebasePushNotificationManager.Initialize(this, true); FirebasePushNotificationManager.ProcessIntent(Intent); I put both in the SplashActivity. I also tried to put the first statement into my BaseApplication or my MainActivity without success. Screenshots Make sure to call FirebaseApp.initializeApp(Context) first and the google-services.json has the GoogleServicesJson build action. If you have that set, then clean and build again, this is a known issue when using Firebase Component. More info and fix here: https://bugzilla.xamarin.com/show_bug.cgi?id=56108. A interesting clean and rebuild did it. Although I had google-services.json added a long time ago. But it seems the issue was, that I already had the two services declared in my project. When I removed them without clean and rebuild the issue appeared. Thanks for your very fast support! :)
GITHUB_ARCHIVE
File carving for compressed NTFS volumes. LaZy_NT is a forensic analysis and data recovery framework designed to carve files from raw disk images. It uses file signatures and other techniques to recover as much of the original data as possible. The feature that sets this software apart from more well-known file carving utilties is that it was designed to detect and carve files that have been compressed by the NTFS file system. NTFS supports compression of individual files, folders or entire volumes using the proprietary ‘LZNT1’ algorithm, from which this package derives its name. While processing a disk image, if NTFS compression is detected, LaZy_NT will decompress the data stream on the fly to ensure that the correct file data is recovered. In addition to standard file carving, LaZy_NT provides a rudimentary bulk ASCII extraction capability to support forensic investigation. When enabled, this mode will decompress and extract all ASCII text data and evaluate it to identify email addresses, URLs, and other personal or forensically interesting information. LaZy_NT operates normally on files and volumes which have not been compressed, and on images of non-NTFS file systems. However under those circumstances the recovery performance may not be as good as a combination of more well known file carving and bulk extraction utilities. The simplest way to invoke the pre-made application is to call the run() method of the App class within the app module. The following example demonstrates how this canbe implemented as a simple launcher script: #!python from LaZy_NT import app ... # Obtain command line arguments, e.g. via argparse, to pass to App() as # keyword arguments. Otherwise defaults from `config` will be used. ... application = app.App() application.run() Alternatively, the API exposed by LaZy_NT can be used to build a more customized file recovery application, without using the app module at all. LaZy_NT is available on PyPI and installable via pip: python -m pip install LaZy_NT The following optional dependencies enhance LaZy_NT by adding metadata extraction for files after they’ve been carved: hachoir-core, hachoir-parser, hacoir-metadata openxmllib pyPdf Pillow (or PIL) All optional dependencies will be installed automatically if LaZy_NT is installed through pip. Documentation for LaZy_NT was generated using pdoc. I would like to recognize Richard Russon and Yuval Fledel, authors of the ‘NTFS Documentation’ manual associated with the Linux NTFS filesystem driver. Without their detailed explanation of the LZNT1 algorithm, this project would not have been possible. I would also like to recognize Simson L. Garfinkel, designer of the well known ‘Bulk Extractor’ utility. While I have not viewed or used any of his source code or documentation in this project, the use of his utility was what inspired me to add ASCII extraction capabilities to this project. Release history Release notifications | RSS feed Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
OPCFW_CODE
using System; using System.Text; using System.Web; using CAESDO.Recruitment.Core.Domain; namespace CAESDO.Recruitment.Core.Abstractions { public interface ITemplateProcessing { string ProcessTemplate(Reference reference, Application application, string template, bool includeUploadPortion); } /// <summary> /// Summary description for TemplateProcessing /// </summary> public class TemplateProcessing : ITemplateProcessing { private const string STR_PositionDetailsURL = "/PositionDetails.aspx"; private const string STR_UploadReferenceURL = "/UploadReference.aspx"; private Application _application; private bool _includeUploadPortion = false; private Reference _reference; public TemplateProcessing() { } #region ITemplateProcessing Members public string ProcessTemplate(Reference reference, Application application, string template, bool includeUploadPortion) { _includeUploadPortion = includeUploadPortion; return ProcessTemplate(reference, application, template); } #endregion #region Private Methods /// <summary> /// Iterates through the given template file and replaces any place where there is a template field marked with {} /// </summary> /// <param name="body">The template text.</param> /// <returns>Body text with fields populated with correct values.</returns> private string HandleBody(string body) { var sb = new StringBuilder(); sb.Append("<html><body>"); string parameter; int begindex = body.IndexOf("{"); // Find the beginning of a replacement string int endindex; while (begindex >= 0) { sb.Append(body.Substring(0, begindex)); // Copy the text that comes before the replacement string to temp body = body.Substring(begindex); // Removes the first part of the string before the { endindex = body.IndexOf("}"); // Find the end of a replacement string parameter = body.Substring(0, endindex + 1); // Pulls the text between {} body = body.Substring(endindex + 1); // removes the parameter substring sb.Append(replaceParameter(parameter)); begindex = body.IndexOf("{"); // Find the beginning of a replacement string } sb.Append(body); if (_includeUploadPortion) sb.Append(getUploadIDPortion()); sb.Append("</body></html>"); return sb.ToString(); } /// <summary> /// Converts the parameter name into the correct value. /// </summary> /// <param name="parameter">Parameter name from the template.</param> /// <returns>Value that should be put into the space where the field was.</returns> private string replaceParameter(string parameter) { // Trim the {} int length = parameter.Length; parameter = parameter.Substring(1, length - 2); switch (parameter) { case "ReferenceName": return _reference.FullName; //if (this._reference.MiddleName.ToString() != "") // If the field is null, the ToString() will make it a blank string. // return this._reference.FirstName + " " + this._reference.MiddleName + " " + this._reference.LastName; //else // return this._reference.FirstName + " " + this._reference.LastName; case "ReferenceLastName": return _reference.LastName; case "ReferenceTitle": return _reference.Title; case "ApplicantName": if (string.IsNullOrEmpty(_application.AssociatedProfile.FullNameFirstLast.Trim())) return "Name Not Given"; else return _application.AssociatedProfile.FullNameFirstLast; //if (this._application.AssociatedProfile.MiddleName.ToString() != "") // If the field is null, the ToString() will make it a blank string. //return this._application.AssociatedProfile.FirstName + " " + this._application.AssociatedProfile.MiddleName + " " + this._application.AssociatedProfile.LastName; //else // return this._application.AssociatedProfile.FirstName + " " + this._application.AssociatedProfile.LastName case "RecruitmentAdminName": return _application.AppliedPosition.HRRep; case "RecruitmentAdminEmail": return string.Format("<a href='mailto:{0}'>{0}</a>", _application.AppliedPosition.HREmail); case "PositionContact": return _application.AppliedPosition.HRRep; case "PositionContactEmail": return string.Format("<a href='mailto:{0}'>{0}</a>", _application.AppliedPosition.HREmail); //return this._application.AppliedPosition.HREmail; case "PositionContactPhone": return _application.AppliedPosition.HRPhone; case "Deadline": //deadline and reviewDate are the same token case "ReviewDate": return _application.AppliedPosition.Deadline.ToLongDateString(); case "PositionTitle": //return this._application.AppliedPosition.PositionTitle; return getPositionLink(); case "PositionLink": return getPositionLink(); case "PrimaryDepartment": if (_application.AppliedPosition.PrimaryDepartment.Unit != null) return _application.AppliedPosition.PrimaryDepartment.Unit.FullName; break; case "UploadLink": _includeUploadPortion = false; //Don't include the default upload portion since we are including it manually here return getUploadIDPortion(); //now return the uploadID portion case "Date": var date = new StringBuilder(); DateTime dateTime = DateTime.Now; // Will convert the existing date into the following format : May 16, 2007 // Instead of something like 5/16/2007 date.Append(GetMonth(dateTime.Month)); date.Append(" " + dateTime.Day); date.Append(", " + dateTime.Year); return date.ToString(); default: //return string.Empty; break; } #if DEBUG return "Error"; #else return string.Empty; #endif } private string getUploadIDPortion() { return "<a href='" + HttpContext.Current.Request.Url.GetComponents(UriComponents.SchemeAndServer, UriFormat.SafeUnescaped) + HttpContext.Current.Request.ApplicationPath + STR_UploadReferenceURL + "?ID=" + _reference.UploadID + "'>Click here to upload reference letter</a>"; } private string getPositionLink() { return string.Format("<a href='{0}?PositionID={1}'>{2}</a>", HttpContext.Current.Request.Url.GetComponents(UriComponents.SchemeAndServer, UriFormat.SafeUnescaped) + HttpContext.Current.Request.ApplicationPath + STR_PositionDetailsURL, _application.AppliedPosition.ID, _application.AppliedPosition.PositionTitle); } // Takes an int representation of Month and returns the string name private string GetMonth(int month) { string date = ""; switch (month) { case 1: date = "January"; break; case 2: date = "February"; break; case 3: date = "March"; break; case 4: date = "April"; break; case 5: date = "May"; break; case 6: date = "June"; break; case 7: date = "July"; break; case 8: date = "August"; break; case 9: date = "September"; break; case 10: date = "October"; break; case 11: date = "November"; break; case 12: date = "December"; break; default: break; } return date; } #endregion public string ProcessTemplate(Reference reference, Application application, string template) { _reference = reference; _application = application; return HandleBody(template); } } }
STACK_EDU
failed to start child: fork/exec I have a script in a particular directory that outputs metrics to stdout. I am getting a failed to start child error. $ ./script-exporter -script.path /etc/scripts -web.listen-address :9661 2019/04/17 02:34:44 error running script 'db2_metrics': failed to start child: fork/exec /etc/scripts/db2_metrics: exec format error 2019/04/17 02:34:54 error running script 'db2_metrics': failed to start child: fork/exec /etc/scripts/db2_metrics: exec format error 2019/04/17 02:35:04 error running script 'db2_metrics': failed to start child: fork/exec /etc/scripts/db2_metrics: exec format error $ ls -la /etc/scripts/ total 20 drwx------ 2 root root 4096 Apr 17 02:49 . drwxr-xr-x. 102 root root 12288 Apr 16 06:48 .. -rwx------ 1 db2inst1 db2iadm1 499 Apr 17 02:45 db2_metrics $ /etc/scripts/db2_metrics db2_connect_status{dbname=DBNAME,groupname=db2} 1 db2_hadr_status{dbname=DBNAME,groupname=db2} 0 Thanks I can't think of how script-exporter would get that error but it would run by other means, though I suspect it's something to do with your shell. What does file /etc/scripts/db2_metrics report? What are the contents of db2_metrics? Can you paste it (eliding passwords and other sensitive information)? $ cat /etc/scripts/db2_metrics #!/bin/bash su - db2inst1 -c "db2 connect to DBNAME 1> /dev/null" if [ $? -ne 0 ] then echo "db2_connect_status{dbname="DBNAME",groupname="db2"} 0" else echo "db2_connect_status{dbname="DBNAME",groupname="db2"} 1" fi HADRinactive="HADR is not active." cmdOutput=$(su - db2inst1 -c "db2pd -db DBNAME -hadr | grep "HADR is"") if [ "$HADRinactive" == "$cmdOutput" ] then echo "db2_hadr_status{dbname="DBNAME",groupname="db2"} 0" else echo "db2_hadr_status{dbname="DBNAME",groupname="db2"} 1" fi What do these commands output? file /etc/scripts/db2_metrics file /bin/bash uname -a $ file /etc/scripts/db2_metrics /etc/scripts/db2_metrics: Bourne-Again shell script text executable $ file /bin/bash /bin/bash: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked (uses shared libs), for GNU/Linux 2.6.18, stripped $ uname -a Linux 2.6.32-696.23.1.el6.x86_64 #1 SMP Sat Feb 10 11:10:31 EST 2018 x86_64 x86_64 x86_64 GNU/Linux How mysterious. strace will almost certainly provide the answer, but it produces a lot of output. Let's start with a very constrained filter, and if need be we'll broaden it. Please post the output of: strace -f -e execve -s 500 bash /etc/scripts/db2_metrics and strace -f -e execve -s 500 ./script-exporter -script.path /etc/scripts -web.listen-address :9661 where in the latter case you arrange to have script-exporter scraped so that it invokes db2_metrics. You will likely have to apt/yum install strace first. Thanks, I have attached 2 txt files with the outputs strace -f -e execve -s 500 bash /etc/scripts/db2_metrics > strace_1.txt strace -f -e execve -s 500 ./script-exporter -script.path /etc/scripts -web.listen-address :9661 > strace_2.txt strace_2.txt strace_1.txt That strace output suggests that the script is executing just fine. I notice in your earlier ls output that the timestamp of the file is later than the timestamps in the error output. Is it possible you fixed something subsequent to those errors? If you plot rate(script_errors_total[1m]), is it nonzero for script_name="db2_metrics"? Hi, thank you for your help. This is now resolved. The issue as expected was in the script. If we look at comment 1 : https://github.com/ncabatoff/script-exporter/issues/10#issue-434143363 the output of the script shows this output. db2_connect_status{dbname=DBNAME,groupname=db2} 1 db2_hadr_status{dbname=DBNAME,groupname=db2} 0 The above DBNAME is not enclosed in "". I fixed the script so the output looks like : db2_connect_status{dbname="DBNAME",groupname="db2"} 1 db2_hadr_status{dbname="DBNAME",groupname="db2"} 0 Great, glad you got it working! I'm pretty sure the error "failed to start child: fork/exec /etc/scripts/db2_metrics: exec format error" was not related to this. You can verify this after the fact. The metric script_parse_errors_total should've been nonzero while you had the missing quotes. The metric script_errors_total should have been nonzero when the script wasn't executable at all. I am also getting the above exception intermittently when I run the docker image from my home network. What might be the issue ? Capturing the errors error running script 'run': failed to start child: fork/exec /opt/script-exporter/scripts/run: exec format error
GITHUB_ARCHIVE
In the October 2017 survey we obtained responses from 1,815,237,491 websites and 6,886,362 web-facing computer systems, reflecting a acquire of 10.2 million websites and 88,300 computer systems. When multiple sites are already configured on the Net server, an intermediate page is displayed to specify to which Website the installation applies: enter the number linked to the location and click OK. A efficiency improvement could be achieved by shifting the rewrite rules from thehtaccess file into the VirtualHost block of your Apache configuration after which altering AllowOverride All to AllowOverride None in your VirtualHost block. Likewise, you could use a Web server if you wish to use the tracking functionality, which is just out there as a Net server extension module. Consequently, it’s unlikely to have much of a presence on the internet within the close to future; additionally, for performance reasons, it’s seemingly that NGINX Unit can be put in behind a regular nginx internet server performing as a reverse proxy. Apache continues to guide the market when it comes to energetic sites and web-dealing with computer systems, where it has market shares of forty four.5” and forty two.3”. It also has the largest presence among the many top million sites, with 386,000 of those utilizing Apache. As soon as the setup is complete you’ll be able to entry the online web page by pointing your browser to the router IP tackle or DNS entry. You may access the default Apache touchdown web page to substantiate that the software program is running properly. Go is an unbelievable effective language to create internet servers and web companies thanks to its highly effective commonplace library that may help us to do no matter we want with the net. You’ll be able to cobble collectively a server out of spare elements and it will nearly certainly be enough to do the job. You can purchase a Linux or BSD server set up ISO without spending a dime, whereas it’s worthwhile to spend some amount of money to (legally) come up with Windows Server, either by means of TechNet or through shopping for it outright. Now that you have your internet server put in, you will have many options for the kind of content material to serve and the technologies you need to use to create a richer expertise. The least-complex, free resolution is to download and set up VirtualBox That will run on an present Windows or OS X or Linux host and will allow you to run a virtualized Linux server with a minimal of fuss. Double the maximum capacity of your present Apache servers with LiteSpeed Internet Server’s streamlined occasion-driven architecture, capable of handling 1000’s of concurrent purchasers with minimal reminiscence consumption and CPU usage. A typical web server is completely different than your laptop, though, because it possible does not have a monitor or a keyboard. The Apache HTTP Server Venture will only present maintenance releases of the 2.2.x taste via June of 2017, and can provide some security patches beyond this date via at the very least December of 2017. As you understand, compatibility situation is a big problem of Android phone, I am unable to check it in all phones. One other net server that is typically obtainable is Web Info Service (IIS).
OPCFW_CODE
Modeling a NoSQL database (DynamoDB) for different sorting queries I have somewhat of a specific question regarding how to model my DynamoDB tables so that I can handle the necessary queries. My application is centered on the idea of "events." Every event has attributes (name, location, time, number of attendees, etc). Events are associated with the cities in which they are located. I am trying to figure out how to perform a get / query request (probably a series of get / query requests) to obtain the top 25 events with the most attendees for a specific city. I come from a background of relational databases, and this would be a really simple query (select * from events where city = x order by attendees limit 25). But I am having a hard time figuring out how to do the same with a non-relational database. I know I will have to create additional tables to store mappings of hashes, but I can't seem to figure it out. One way I have thought of implementing it is to somehow let the "attendees" (of Number type) be the range key, and let the city be the hash key. But this will not necessarily be a unique key because multiple events in the same city could have the same number of attendees. Also, is it even possible to "update / atomically increment" a range key? Thanks for all your help! Note: I still believe that RDBMS is more suited for these queries but here goes: First, You can only atomically increment an attribute. Now for your scenario I would suggest the following: Table: Events hk: eventId attributes Table: Top_Attendees_Per_City hk: city rk: eventId Table: Event_Id_Generator hk: event_counter running_counter Table: Minimum_Attendees_Per_City hk: city min_attendees_number, max_attendees_number, events_number Once an event has fired to your backend, you will need to assign it a running id. this is not mandatory and it has issues with scaling but it will ensure that if events have the same number of attendees, newer events will take precedence in your "top25".You will need to check if the number of attendees is between the min and max while counting the events until "25". This enables your mimimum_attendees_per_city to decide whether this new event is going to appear in the top25. If it is, then you add it to the top_attendees_per_city. Finally, you Query that table with setScanIndexForward(false) and setLimit(25) resulting with the 25 events with the highest number of attendees. Final note: The resulting items are not ordered by their attendees, you can order them in the application level before returning.
STACK_EXCHANGE
Though Python programming language has equivalent characteristics with PERL, it's a lot more object-oriented capabilities and is a lot more impressive. One of several primary points of interest of using the R () natural environment is the ease with which customers can compose their very own applications and custom made capabilities. The R programming syntax is incredibly straightforward to learn, even for customers without former programming working experience. The most commonly applied graphical built-in enhancement ecosystem for R is RStudio.[fifty] An identical development interface is R Resources for Visible Studio. Honest premiums and awesome special discounts – Effectively composed paperwork at a good selling price is what most learners are trying to find. No other web-site give the very best costs than us. We arrived up Along with the rates with our pupils in mind. We know how students work flat out to get income. It's got a pure substitute for programs which include pure language processing. Softwares such as Siri and Wolfram Alpha are beginning to permeate the tip consumer Market and several these types of applications are still to return will probably be created in Python. Anyway, there are various researchers who prefers Matlab. Our group performs with both of those languages and we offer greatest Python assignment help in comparison to the other higher education homework help Sites. The key R implementation is penned in R, C, and Fortran, and there are various other implementations aimed toward improving upon speed or raising extensibility. A intently similar implementation is pqR (very quick R) by Radford M. Neal with improved memory management and aid for computerized multithreading. Renjin and FastR are Java implementations of R to be used inside of a Java Virtual Device. Quick R code calculating Mandelbrot established in the initial 20 iterations of equation z = z2 + c plotted for various complex constants c. This example demonstrates: “ I'd lots of problem with my computer programming assignment and with 24 hours left I had been freaking out. Thanks for rescuing me on time. You saved me from repeating a faculty calendar year ”Tom Smith, United states So please don’t Feel a lot more if you really want python homework help. Just fill the Get in touch with variety and get instantaneous help within the Python expert who click this link is me Python has a reference counting plus a cycle-detecting rubbish collector to help manage its memory. For the duration of application execution, a characteristic often called late binding or dynamic identify resolution is utilized to bind the tactic and variable names. Python programming language is made to be remarkably extensible and programmable interfaces is usually added to existing purposes. “If you don’t need to fill the form, e mail me your requirement at email@example.com and acquire the instant reply” Totally free rework in your articles – Our experts are incredibly proficient and hardly ever make errors. It's very unlikely that college students come back to us for rework. Nevertheless, in almost any scenario this comes about, We are going to give you with totally free endless revisions. Often academics and professors assign get the job done to The scholars that is complicated. These assignments are provided to evaluate the normal development of every university student in their own individual spots.
OPCFW_CODE