From a31a3a3f3dbdc31d02bbaa7b4638c14f99951721 Mon Sep 17 00:00:00 2001 From: plocket <52798256+plocket@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:27:18 -0400 Subject: [PATCH] Deduplicate data we save to the files, tweak formatting --- .../decisions/007_getting_and_storing_logs.md | 21 +- .../008_keep_setup_run_takedown_separate.md | 31 +++ docs/decisions/reports_and_logs_2024_08_11.md | 197 +++++++++++------- docs/errors_and_logs.md | 10 +- docs/setup_run_and_takedown.md | 87 ++++++++ lib/run_cucumber.js | 30 +-- lib/steps.js | 4 +- lib/utils/log.js | 33 ++- lib/utils/reports.js | 63 ++++-- tests/unit_tests/log.test.js | 6 +- 10 files changed, 360 insertions(+), 122 deletions(-) create mode 100644 docs/decisions/008_keep_setup_run_takedown_separate.md create mode 100644 docs/setup_run_and_takedown.md diff --git a/docs/decisions/007_getting_and_storing_logs.md b/docs/decisions/007_getting_and_storing_logs.md index 934be861..e10bd386 100644 --- a/docs/decisions/007_getting_and_storing_logs.md +++ b/docs/decisions/007_getting_and_storing_logs.md @@ -37,7 +37,7 @@ Use fs.appendFileSync with node's `util.inspect`. In future, we might explore ca ### Listen for write to `stdout` -We'd listen for `stdout` and `stderr` events. We haven't yet experimented with detecting which logs are cucumber's and storing only those. +We'd listen for `stdout` and `stderr` events. We haven't yet experimented with detecting which logs are cucumber's and storing only those. See pseudo code in the supplementary material section. **Pros:** @@ -157,4 +157,23 @@ log.with_flush( run ); `Log` would handle creating the promises. `log.with_flush()` would handle the try/catch logic to wait for the listeners and close the streams. +### Listen for write to `stdout` + +For capturing console output, look into how to get the `stdin` for capturing various console output. https://stackoverflow.com/a/54202970: + +```js +const stdin = process.openStdin() + +process.stdout.write('Enter name: ') + +stdin.addListener('data', text => { + const name = text.toString().trim() + console.log('Your name is: ' + name) + + stdin.pause() // stop reading +}) +``` + +We couldn't figure out how to handle decoding emoji. + [^3]: I'm not sure of good names to differentiate the code in run_cucumber.js, which triggers the rest of the test running, but is separate from it, and the code that handles the nitty gritty - like index.js, steps.js, scope.js, etc. "Running cucumber" is ambiguous. I'm calling run_cucumber.js code the "cucumber runner" and I'm calling the code that handles the actual tests the "tests handler". diff --git a/docs/decisions/008_keep_setup_run_takedown_separate.md b/docs/decisions/008_keep_setup_run_takedown_separate.md new file mode 100644 index 00000000..527119d3 --- /dev/null +++ b/docs/decisions/008_keep_setup_run_takedown_separate.md @@ -0,0 +1,31 @@ +# Keep setup run takedown separate + +## Context and Problem Statement + +We have multiple environments in which ALKiln runs. Some need to always install the author's package code, others only need to install the code sometimes, some don't install the code at all. How often do different kinds of environments use the setup, install, and takedown commands? + +**Always:** GitHub actions always need to install the package onto a server. There are two kinds of ways to run ALKiln in GitHub hidden under this umbrella. Both ways copy the current branch's code and then install the package on a server (setup). + +Option 1: One action installs the package on the server itself (a temporary docassemble server hosted on GitHub). + +Option 2: The other installs in an individual author's account (their Playground) on the docassemble server[^1]. + +Then our GitHub action runs the tests using that installed package (run). When the test suite is done, the GitHub action either destroys the temporary server (option 1) or just deletes the package from the author's server (takedown). + +**Sometimes:** Internal developers of ALKiln install the feature branch they're working on onto a server with `setup` (install). They run tests multiple times using that installed package with `run_cucumber` (many runs). When they're done with their feature, they delete the project from the server with `takedown`. The server might be local or might be remote. + +**Never:** ALKilnInThePlayground is a package an author can install on their own servers. It is a form the author can use to run ALKiln test on the server as they develop their package[^1]. It gives them a faster iteration cycle. Since it is on their server, it has access to information about the author's account and the packages they have installed on their account. They control creating and deleting the package they want to test. Since they run the ALKiln tests for a package that they've already installed on their server (run), ALKilnInThePlayground doesn't need the setup or takedown steps. + +[^1]: A docassemble server can have multiple purposes. One is to contain the packages for online forms which end users then use. Those packages are stored in a special place on the server and admins configure the server to give end users a list of those forms. Another purpose of the docassemble server is to be a development environment in which to create the forms. An author/developer of the forms has a developer account on the server. That account can store multiple packages and versions of packages. It also has an IDE that authors can use to work on those packages. They can push those files to GitHub from that IDE. They can also pull those packages from GitHub using that IDE. + + +## Considered Options + +- ALKiln could have one script to contain all setup, run, and takedown functionality. +- Internal devs can manually install packages onto servers (local or remote) in whatever way they want using the command prompt to handle setup and takedown separately from running. +- ALKiln could have individual scripts for setup, run, and takedown. GitHub actions would use all three. + +## Decision Outcome + + + diff --git a/docs/decisions/reports_and_logs_2024_08_11.md b/docs/decisions/reports_and_logs_2024_08_11.md index dcce21df..fbe2012b 100644 --- a/docs/decisions/reports_and_logs_2024_08_11.md +++ b/docs/decisions/reports_and_logs_2024_08_11.md @@ -1,114 +1,124 @@ # Shape and behavior of the report and log objects - + + ## Context and scope -We have reports and logs we show to the user in the console and as artifacts. We have logs we use for development. cucumber also has logs we want to include in the above until we are confident we are handling all cases ourselves. +We show logs to the user in the console and as test suite artifacts. We have logs we use for development. cucumberjs also has logs we want to include in the above until we are confident we are handling all cases ourselves. -Right now, the system for logging is fractured and scattered - reports log to the console and save completely separately from other logs. Logs that are not from the report don't get saved anywhere. Some logs only get shown in debug mode, which can be frustrating and unhelpful. +Right now, the system for logging is fractured, scattered, and duplicated. Logs that are not from the report don't get saved anywhere. Some logs only get shown in debug mode in the console, which can be frustrating and unhelpful. We lack verbose output information for tests that authors run in the wild. + +This document excludes discussion of reports, but it is useful to note that at the moment they are completely separate from logs and yet save to some of the same files. ## Goals -Overview: - -- Unify the logging and report systems -- Clarify their flow -- Make log information more visible - -Details: - -- Log objects have statuses, icons, log codes (e.g. ALK0105), contextual info (e.g. `setup`, `takedown`, `invalid answer`), timestamps, descriptive text, and data (like full error traces). -- These are the broad purposes of various logs: -1. Setup and takedown messages. -2. Test progress (a compact way to see what's happening/happened in each test): - - Test1: .............F-- - - Test 2: .. -3. A pretty report at the end with details about each test. It has sections, like "failed" and "passed", headings, etc. -4. cucumber's various console logs -5. Lots of other informational logs for internal development and debugging purposes. -I want to do different things with different logs: -- During the tests, always log items 1, 2, 3, and 4 to the console. -- During the tests, save items 2, 3, and 4 to a log as they come with all the info in the log objects - `report_log.txt`/`running_report.txt`/`report_run.txt` maybe. If we have to stop the tests early because of an infinite loop or something, this will at least show some information. -- At the end of the tests, format items 2, 3, and 4, often with only part of the log object info, in a file called `report.txt`. -- During the tests, save all of the items to a `verbose_log.txt` as they happen. -- In debug mode, log all that stuff to the console while the tests are running. -Right now, a few of the logs, like for the test progress (item 2), are printed with `stdout` inline while all others are logged with `console.log`. -- Logs should accept multiple arguments to match console.log/etc. behavior. - -Note: The progress logs printed to the console have color codes. We also may want a plain text version without any colors to save in the report log and show in the Playground[^1] "console" and/or a version converted to HTML that does have styles. The latter may be for something easier to digest in a browser and/or to show visually in the Playground version of the tests. - -Questions: -- Should `log` throw errors? Otherwise everywhere we need to error, we need to use `log` and `throw`, but I am unable to find a way to remove the current function from an error stack/trace. -- How do we capture "errors" that are more subtle? Keep the word "error" in the "context"/"types" list? For example, expected errors: - - Invalid answers on the form that are then assessed as valid behavior? - - `AbortError`s of promises that should indeed have been aborted. -- Should we allow "in-between"s for all parts of the log? All together: pre-everything (new lines, dividers, etc), icon/context/etc., pre-logs, logs (the actual messages to print), pre-data, data, post-data? We can default to nothing for all of those. -- How do we differentiate between methods that log to the console vs. methods that just store data? - -[^1]: Don't go down this rabbit hole. Broadly - with the "Playground" version, users see the results as a web page instead of in the GitHub job console or in the artifacts. The users run the tests on their own server. This has to do with the platform we built ALKiln for - [docassemble](https://docassemble.org/). It gives users a faster iteration cycle. +- Make log information persistent and visible +- Add additional meaningful information to logs (e.g. metadata) +- Unify and clarify log flow +- Make log behavior consistent ## Non/anti-goals -- Spend a month on this +- Disrupt the signals we're giving authors about their tests with the noise of internal ALKiln details - Create a complex and heavy system to try to abstract absolutely everything +- Rigidity ## The actual design -This design still seems a little complex and potentially too coupled with reports. +### `Log` internal errors -### System-context diagram +Logs are informational and not related to the behavior of authors' tests. Because of that, these logs should avoid adding noise to what an author sees. That is, internal warnings or errors should look different than authors' test errors. To help with this: -``` -logic code -> logger -> report -----------> console - ^ |-> console |-> stdout/progress - cucumber ---| |-> running report |-> final report.txt - |-> verbose log -``` +- All log methods` arguments have default values. +- We use try/catch very aggressively in the `Log` class. -``` -logic code -> logger ------> report ------> final report.txt - ^ |-> console - cucumber ---| |-> stdout/inline/progress - |-> running report - |-> verbose log -``` +Read more of the thoughts behind this choice in the decision doc on which log level to pick and the design doc on types of errors. -``` - |------------------------------------<| - v | -logic code -> logger -> report -----------> format prettily - ^ |-> console - cucumber ---| |-> running report - |-> verbose log - |-> stdout/progress - |-> final report.txt -``` +### Purposes of methods + +Some of these notes have to do with reports because logs and reports are using some of the same files at the moment and yet are not integrated. We'll write a doc for reports when we integrate reports more fully with logs. + +1. `Log`: Messages or errors from wrapping functionality, like setup and takedown messages. +2. `Log`: Test progress (a compact way to see what's happening/happened in each test): + + Test1: .............F--. + + Test 2: .... +3. Report: A pretty report at the end with details about each test. It has sections, like "failed" and "passed", headings, etc. +4. Report: cucumberjs's various console logs. +5. `Log`: Lots of other informational logs for internal development and debugging purposes. + +Different logs belong in different places: + +- ALKiln always logs items 1, 2, 3, and 4 to the console. +- Ideally save all items in a verbose debug file sequentially as the tests run. +- During the tests, save items 2, 3, and 4 to a running report file sequentially as they come. Include metadata. Suggested filenames: `report_log.txt`/`running_report.txt`/`report_run.txt`. If we have to stop the tests early because of an infinite loop or something, this will at least show the test author some information. +- At the end of the tests, format items 1, 2, 3, and 4 nicely in a file called `report.txt`. This will mostly exclude the metadata. +- When there is a warning-level or error-level log, save 1, 3, 4, and 5 in an unexpected results file too. +- In future, discuss the usefulness of having a debug mode where ALKiln logs all items to the console while the tests are running. + +We may also want copies of various files that have different formats for different environments (see docs on the different environments in which ALKiln runs). + +- Console logs with colors. +- Plain text versions that exclude those color codes. +- HTML versions to show with, for example ALKilnInThePlayground. Maybe in GitHub as well in the future. + +### Anatomy and behavior of logging methods + +A `Log`'s logging methods add additional metadata to a log: -### APIs +1. The level of the log (success, info, warning, error, and custom values) +2. An icon showing the level of a log +3. A log code, like (e.g. ALK0105) +4. Contextual info about where the log originated (e.g. "setup", "takedown", "invalid answer") +9. A timestamp -Everything saves to the "verbose"/"debug" log. Aside from that: +To be able to show that metadata, most methods will accept custom values for parts of that metadata. Most methods offer: -`log.info/warning/success/error` methods log to console. This includes, e.g, setup, takedown which show up in GitHub workflow job logs. +- Log code +- Contextual info +- The error throwing information -`log.report` saves to the running report then uses a report obj focused on cosmetics that has `.heading/table/header/error/etc` methods. Maybe a `.stdout/inlne` method, which then prints to the console immediately. +For the debug method, everything other than the timestamp can be customized. -`log.verbose/debug` (name?) - Only save to verbose/debug log with log codes, etc. +Callers can influence the content and formatting of most parts of the log message. For example, they can call `.warn` to show a warning icon and level metadata. The one exception would usually be the part before the metadata. Customizing that could be useful for things like creating a visual separator before ALKiln starts evaluating the fields of a web page. For that reason, the caller can add a `before` property to the metadata that a `Log` object will add before the metadata. -### Data storage +A `Log` object should be able to throw errors so that it can log metadata and any additional styling and context along with the error. It can also make sure that the error will be an actual `Error` object (as opposed to a string or other object). Otherwise everywhere we needed to throw an error, we would need duplicate handling bad data (like objects with circular references), using `Log` to save to files (etc.), throwing the error, and other possible complications. For that, `Logs` also need to accept this additional information: + +- Whether to throw an error +- The error to throw + +Those methods should also accept multiple individual log arguments the same way `console.log()` does. This lets the caller include strings separately from objects they want to log, including `Error` objects, so that a `Log` object can handle them safely and style them consistently. We could have required a list of logs instead, but the extra syntax was frustrating. + +Right now, the only way we differentiate methods that just store data vs. methods that also log to the console is with documentation. This seems less than ideal. Using the name `record` could be confusing because developers might think they need to log to the console separately from saving the information to debug files. + +### Report + +Unfinished. Will this/how will this be integrated with `Log`? Will it just use `Log`? + +A report will have different formats for sections, headings, tables, etc. It will also save to debug files and unexpected results files. It will also have a plain text version, an HTML version, and maybe a version with console colors. + +Many of its needs overlap with those of `Log`, so it seems appropriate to avoid duplicating some behavior. If we keep a separate `Report` class, it might use a `Log` instance to use `Log`'s `.debug()`. We may have separate functions that transform output to the 3 formats we want. + +## APIs + +See the `Log` class and its tests for the API. + +## Data storage - `report.txt` - final pretty output for the user with headings, etc. - Each Scenario's `report.txt` - pretty output in each test's folder. -- `running_report_log.txt` (name?) - ugly, but still sparse, giving the user some info about what happened during the tests without overwhelming them. -- `verbose.txt` (name? `debug`?) - very ugly and full of all the information from everywhere else and more. Useful for internal development and troubleshooting. Includes setup and takedown. +- `report_log.txt` (name?) - ugly, but still sparse, giving the user some info about what happened during the tests without overwhelming them. +- `debug_log.txt` (name? `debug`?) - very ugly and full of all the information from everywhere else and more. Useful for internal development and troubleshooting. Includes setup, takedown, etc. +- `temp_unexpected_results_debug_log.txt` - A file containing messages about unexpected behavior, like test warnings and errors, as well as internal ALKiln warnings. We don't output this in our GitHub actions - the file itself is temporary, we just add its contents to the final unexpected results file. All its information exists in the debug file too. +- `unexpected_results.txt` - Logs for failures and other unexpected behavior. It has information about test warnings and errors, as well as internal ALKiln warnings. At first we only save report information there. When the tests finish, we add the contents of the specific debug file that contains only unexpected results. It may be complex to store setup and takedown output in the verbose/debug file. `console.Console` cannot append to a file[^2], so we would have to figure out a work-around. One approach could be to always first copy from the existing verbose file and paste those contents back into the file as the first log. That may take setting up some fallbacks to avoid errors. -[^2]: `fs.appendFile()` only takes strings and doesn't log exactly like the console. I want to log exactly like the console to make it easy to search for words I and others see in the console output. +[^2]: `fs.appendFile()` only takes strings and doesn't log exactly like the console. Logging exactly like the console could make it easy to search for words we see in the console output. -### Code and pseudo-code +## Code and pseudo-code For capturing console output, look into how to get the `stdin` for capturing various console output. https://stackoverflow.com/a/54202970: @@ -125,7 +135,7 @@ stdin.addListener('data', text => { }) ``` -### Degree of constraint +## Degree of constraint I think this can basically be greenfield (designed from scratch). We do have a current system, but I think we can easily convert our existing code to a new system. @@ -136,8 +146,37 @@ I think this can basically be greenfield (designed from scratch). We do have a c - For report content, `log` would call to `report`, which would do pretty formatting and then hand the format text back to `log` to be saved and/or printed to the console. I'm not sure how `log` could then know what to do with the output - leave out the codes/etc. when it logs to the console and/or saves to the final report.txt. - Avoid the complexity of storing setup and takedown output in the verbose/debug file. In GitHub, they're in the GitHub console. Locally, we see the results immediately. On the other hand, storing them ourselves lets us look for information in just one place. +### System-context diagram alternatives + +``` +`Log` + +console methods -> ._console() -> .debug() -> stdout/stderr +report and its methods -> .debug() -> eventually stdout/stderr +cucumberjs stdout writes get caught -> .debug() +.stdout() -> .debug() -> sometimes stdout/stderr + +.debug() -> debug_log.txt + -> sometimes unexpected_results.txt + +# unfinished +report and its methods -> report_log.txt (plain text) + -> report summary + -> report.txt (plain text summary) + -> report.html (html summary) + -> final console log (summary with console colors) + -> individual Scenario reports + -> same output formats as report +``` + ## Cross-cutting concerns _Security, privacy, observability, etc._ The same ones we've already dealt with for our current logs. + +## Related documentation + +Decision doc about which log level to use. +Architecture design doc about + diff --git a/docs/errors_and_logs.md b/docs/errors_and_logs.md index 8d57c93b..430ae8d6 100644 --- a/docs/errors_and_logs.md +++ b/docs/errors_and_logs.md @@ -1,6 +1,6 @@ # Errors - + An `Error` instance can mean a lot of different things in the context of this testing framework. They're not all for an author's test failures. @@ -19,4 +19,10 @@ An `Error` instance can mean a lot of different things in the context of this te 1. **ALKiln un-handled bugs**: For example, we have forgotten to define a variable and none of our tests has yet triggered that code. These are unexpected errors that force processes to abort or force individual tests to fail in uninformative ways. To an author that runs into that error, it looks like their test has failed, but the fault is ours and we need to troubleshoot it. We should make that clear. 1. **ALKiln silent bugs**: For example, a debug log has an object that causes an error when ALKiln tries to turn it into a string. ALKiln should record the failure somewhere, but should avoid throwing it. We can troubleshoot those if we see them and add new internal tests. See decisions docs on when to use which kind of log level. -1. **ALKiln expected errors**: For example, one promise won a `Promise.race()`, so ALKiln aborted the other promises. That causes those aborted promises to error. ALKiln saves the "informational" Error to the debug files and it has a level of "info" or "debug". +1. **ALKiln expected errors**: For example, one promise won a `Promise.race()`, so ALKiln aborted the other promises. That causes those aborted promises to error. That is why ALKiln saves the "informational" Error to the debug files and it has a level of "info" or "debug". + +## ALKiln's internal tests + +1. **ALKiln internal success-handling test failures**: For example, the test fails to fill out radio buttons correctly. +1. **ALKiln internal failure-handling test successes**: For example, we test that the summary report shows a specific log code when an interview fails to load and the log code shows up correctly. The "test" fails - the interview fails to load - but *our* test passes - the right log code was in the summary report. +1. **ALKiln internal failure-handling test failures**: For example, we test that the report shows a specific log code when an interview fails to load, but the log code is missing. The "test" fails and *our* test fails. diff --git a/docs/setup_run_and_takedown.md b/docs/setup_run_and_takedown.md new file mode 100644 index 00000000..c6951e00 --- /dev/null +++ b/docs/setup_run_and_takedown.md @@ -0,0 +1,87 @@ +# Setup, run_cucumber, and takedown + +## Context and scope + + + + + + + + + +### Degree of constraint + +**`Log` and debug files:** + +One test suite's `Log` needs to save debug logs to specific files. `Log` needs to know what folder it should store those files in. + +The 3 processes the GitHub action uses are completely separate javascript runtimes, so they will each have their own `Log` instance. They are all in the same GitHub action, though, so the GitHub action can create the folder name and give it to each process[^2]. We build `Log` to account for this. + +On the other hand, the folder name has a very specific date format. It's one we use other places too. Duplicating creating the exact same date format in multiple places - the GitHub action, ALKilnInThePlayground, a local internal developer's actions, and in ALKiln's test-running code - would make maintenance harder. + +Right now, `Log` handles 2 cases: getting an existing folder name to use vs. getting no folder name. + +[^2]: Basically, they are people separated by huge cliffs and can't just hand each other a tool. They have to yell instructions across the gap about how the next person can make their own tool. + +### APIs + + + +## Alternatives considered + +Alternative to managing artifacts folder name, including dates, in multiple places: + +Make a script that saves the folder name to the ALKiln `runtime_config.json`. Then everyone, for example, `Log`, would always use `runtime_config.json`. All processes would use that script first. That could be in `setup` script, in the ALKilnInThePlayground, and potentially in the `package.json` script that developers usually use. Developers that customize their setup and takedown behavior would have to do this by hand. Is that really less complicated? It at least adds another point where we go back and forth between the caller (GitHub action, ALKilnInThePlayground, and local scripts) and the javascript. + +Alternative script names: `install_package`, `delete_package`. + + +## Cross-cutting concerns + + + +## Details + +### Environments in which ALKiln runs + +**Always:** GitHub actions always need to install the package onto a server. There are two kinds of ways to run ALKiln in GitHub hidden under this umbrella. Both ways copy the current branch's code and then install the package on a server (setup). + +Situation 1: One action installs the package on the server itself (a temporary docassemble server hosted on GitHub). + +Situation 2: The other installs in an individual author's account (their Playground) on the docassemble server[^1]. + +Then our GitHub action runs the tests using that installed package (run). When the test suite is done, the GitHub action either destroys the temporary server (situation 1) or just deletes the package from the author's server (takedown). + +**Sometimes:** Internal developers of ALKiln install the feature branch they're working on onto a server (install). They run tests multiple times using that installed package (many runs). When they're done with their feature, they delete the project from the server (takedown). The server might be local or might be remote. + +**Never:** ALKilnInThePlayground is a package an author can install on their own servers. It is a form the author can use to run ALKiln test on the server as they develop their package[^1]. It gives them a faster iteration cycle. Since it is on their server, it has access to information about the author's account and the packages they have installed on their account. They control creating and deleting the package they want to test. Since they run the ALKiln tests for a package that they've already installed on their server (run), ALKilnInThePlayground doesn't need the setup or takedown steps. + +[^1]: A docassemble server can have multiple purposes. One is to contain the packages for online forms which end users then use. Those packages are stored in a special place on the server and admins configure the server to give end users a list of those forms. Another purpose of the docassemble server is to be a development environment in which to create the forms. An author/developer of the forms has a developer account on the server. That account can store multiple packages and versions of packages. It also has an IDE that authors can use to work on those packages. They can push those files to GitHub from that IDE. They can also pull those packages from GitHub using that IDE. + + diff --git a/lib/run_cucumber.js b/lib/run_cucumber.js index d4b87498..ecb74839 100755 --- a/lib/run_cucumber.js +++ b/lib/run_cucumber.js @@ -172,22 +172,28 @@ async function main() { ); } - // Include debug info at the bottom of the unexpected results file if needed - let debug_unexpected_content = ``; + /** Include debug info at the bottom of the unexpected results file if + * needed. Avoid creating the file if its not necessary. */ + let temp_debug_unexpected_content = ``; try { - debug_unexpected_content = fs.readFileSync(`${ log.path }/${ log.debug_unexpected_filename }`); - } catch ( error_reading_debug_unexpected ) { - // It's ok if this file doesn't exist - } - if ( debug_unexpected_content !== `` ) { - fs.appendFileSync( - `${ log.path }/${ log.unexpected_filename }`, - `\n\n\n\n\n\n\n\n -≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ + temp_debug_unexpected_content = fs.readFileSync(`${ log.path }/${ log.temp_unexpected_debug_log_filename }`); + } catch ( ignore_err ) { /* Missing debug info is ok */ } + + if ( temp_debug_unexpected_content !== `` ) { + // Add spacing before the debug content if file already has content + try { + fs.readFileSync(`${ log.path }/${ log.unexpected_filename }`); // Does it have contents? + fs.appendFileSync(`${ log.path }/${ log.unexpected_filename }`, `\n\n\n\n\n` ); + } catch ( ignore_err ) { /* Missing unexpected file is ok */ } + + // Add the heading for debug content + fs.appendFileSync( `${ log.path }/${ log.unexpected_filename }`, +`≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ 🐛 Full DEBUG info (some repeated content) 🐛 ≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡\n\n` ); - fs.appendFileSync(`${ log.path }/${ log.unexpected_filename }`, debug_unexpected_content ); + // Add the debug content itself + fs.appendFileSync(`${ log.path }/${ log.unexpected_filename }`, temp_debug_unexpected_content ); } } catch ( writing_cucumber_error ) { diff --git a/lib/steps.js b/lib/steps.js index a0b4be48..317e4295 100644 --- a/lib/steps.js +++ b/lib/steps.js @@ -112,8 +112,8 @@ BeforeAll(async function() { scope.paths.debug_log = `${ scope.paths.artifacts }/${ log.debug_log_filename }`; scope.paths.report_log = `${ scope.paths.artifacts }/${ log.report_log_filename }`; scope.paths.report = `${ scope.paths.artifacts }/${ log.report_filename }`; - scope.paths.debug_unexpected = `${ scope.paths.artifacts }/${ log.debug_unexpected_filename }`; - scope.paths.failures = `${ scope.paths.artifacts }/${ log.unexpected_filename }`; + scope.paths.debug_of_unexpected = `${ scope.paths.artifacts }/${ log.temp_unexpected_debug_log_filename }`; + scope.paths.report_of_unexpected = `${ scope.paths.artifacts }/${ log.unexpected_filename }`; reports.create( scope ); }); diff --git a/lib/utils/log.js b/lib/utils/log.js index fb749f05..1e491a30 100644 --- a/lib/utils/log.js +++ b/lib/utils/log.js @@ -48,7 +48,7 @@ class Log { // Debug filenames debug_log_filename = `debug_log.txt`; // Should always exist - debug_unexpected_filename = `debug_unexpected_results.txt`; // Sometimes exists + temp_unexpected_debug_log_filename = `temp_unexpected_results_debug_log.txt`; // Sometimes exists unexpected_filename = `unexpected_results.txt`; // Only created if needed report_log_filename = `report_log.txt`; // Not integrated, but used // Regular report filename @@ -300,6 +300,10 @@ class Log { do_throw = false, } = debug_opts; + // "warning" level and icon for for internal devs we use to avoid disrupting + // the info authors are looking for + if ( level === `note` ) { icon = `🖊️`; } + let formatted = `Skipped formatting all logs.`; try { @@ -458,14 +462,14 @@ class Log { * @returns { string } The formatted log. * * @example - * // Quotes are to make the surrounding white space more clear + * // Backticks are to make the surrounding white space more clear * log._stringify_inline({ prev_line_type: null, inline_log: "apple" }); - * // "apple" + * // `apple` * log._stringify_inline({ prev_line_type: 'inline', inline_log: "apple" }); - * // " apple" + * // ` apple` * log._stringify_inline({ prev_line_type: 'block', inline_log: "apple" }); - * // - * // apple + * // ` + * // apple` * */ // An inline log's string starts with nothing if it's the first string... let start = ``; @@ -557,6 +561,13 @@ class Log { } // Ends _combine_logs_and_possible_error() _get_Error_safely({ maybe_error }) { + /** Given anything, return an Error. Avoid throwing any errors. + * + * @param { Object } opts + * @param { * } opts.maybe_error - Optional. Can be anything. + * + * @returns { Error } + * */ if ( maybe_error instanceof Error ) { return maybe_error; } else { @@ -793,7 +804,7 @@ class Log { } try { - fs.appendFileSync( `${ this.path }/${ this.debug_log_filename }`, `\n` + text ); + fs.appendFileSync( `${ this.path }/${ this.debug_log_filename }`, text + `\n` ); } catch ( fs_append_error ) { // If this is our first time failing to write to the debug log, console // warn about it. @@ -804,7 +815,7 @@ class Log { try { if ( unexpected ) { - this._save_to_unexpected({ text }); + this._save_to_temp_unexpected_file({ text }); } } catch ( error_sending_to_unexpected ) { // Do nothing. These values will be in the debug log @@ -830,7 +841,7 @@ class Log { } } - _save_to_unexpected({ text = `` } = {}) { + _save_to_temp_unexpected_file({ text = `` } = {}) { /** Save to the unexpected results file. Intended for errors and warnings. * Catch ANY error. * @@ -841,9 +852,9 @@ class Log { * @returns { string } The same value as given. * */ try { - fs.appendFileSync(`${ this.path }/${ this.debug_unexpected_filename }`, `\n${ text }` ); + fs.appendFileSync(`${ this.path }/${ this.temp_unexpected_debug_log_filename }`, `${ text }\n` ); } catch ( error_saving_to_unexpected_results_file ) { - // Do nothing. We should get the data in the debug file or console. + // Do nothing. At worst, we'll get the data in the debug file or console . } return text; } diff --git a/lib/utils/reports.js b/lib/utils/reports.js index f2a19430..ff9adf79 100644 --- a/lib/utils/reports.js +++ b/lib/utils/reports.js @@ -121,24 +121,63 @@ reports.saveFinalScenarioData = function( scope, { status=`in progress` }) { this.setReportScenarioStatus( scope, { status }); let scenario = scope.report.get( scope.scenario_id ); let report = this.getPrintableScenario( scenario ); - if ( scenario.status === `FAILED` ) { this.failures.push( report ); } - else if ( scenario.status === `PASSED` ) { this.successes.push( report ); } - // else if ( scenario.status === `WARNING` ) { passed_reports += report; this.warnings.push( report ); } - // else if ( scenario.status === `UNDEFINED` ) { passed_reports += report; this.undefineds.push( report ); } - else { this.others.push( report ); } - - // Save to unexpected results files (worry about optimization later) - if ( this.failures.length > 1 || this.others.length > 1 ) { - let unexpected_results = this.title + this.getPrintableUnexpected( scope ); - fs.writeFileSync( scope.paths.failures, unexpected_results ); - fs.appendFileSync( scope.paths.debug_unexpected, unexpected_results ); + if ( scenario.status === `PASSED` ) { + this.successes.push( report ); + } else { + this._save_unexpected( scope, { status: scenario.status, report }); } + // if ( scenario.status === `FAILED` ) { + // this.failures.push( report ); + // } + // else if ( scenario.status === `PASSED` ) { this.successes.push( report ); } + // // else if ( scenario.status === `WARNING` ) { passed_reports += report; this.warnings.push( report ); } + // // else if ( scenario.status === `UNDEFINED` ) { passed_reports += report; this.undefineds.push( report ); } + // else { this.others.push( report ); } + + // // Save to unexpected results files (worry about optimization later) + // if ( this.failures.length > 1 || this.others.length > 1 ) { + // let unexpected_results = this.title + this.getPrintableUnexpected( scope ); + // fs.writeFileSync( scope.paths.report_of_unexpected, unexpected_results ); + // fs.appendFileSync( scope.paths.debug_of_unexpected, unexpected_results ); + // } + // Save to reports.txt (worry about optimization later) fs.writeFileSync( scope.paths.report, this.getPrintableReport( scope ) ); return scenario; }; // Ends reports.saveFinalScenarioData() +reports._save_unexpected = function(scope, { status, report }) { + /** Store the unexpected report and write to the unexpected report file. + * + * @param {Object} scope - State and methods of the current tests + * @param {Object} data + * @param {Object} data.status - Status of the current scenario + * @param {string} data.report - Report for the current scenario + * */ + if ( status === `FAILED` ) { + this.failures.push( report ); + // } else if ( status === `WARNING` ) { passed_reports += report; this.warnings.push( report ); + // } else if ( status === `UNDEFINED` ) { passed_reports += report; this.undefineds.push( report ); } + } else { + this.others.push( report ); + } + + // If it's our first unexpected result, add the title to the top of the page + let already_unexpected = ``; + try { already_unexpected = fs.readFileSync( scope.paths.report_of_unexpected ); } + catch ( ignore_non_critical_error ) {} + if ( already_unexpected === `` ) { + try { fs.appendFileSync( scope.paths.report_of_unexpected, this.title ); } + catch ( ignore_non_critical_error ) {} + } + + try { fs.appendFileSync( scope.paths.report_of_unexpected, report ); } + catch ( ignore_non_critical_error ) {} + + return this; +} + reports.progress = function ({ logs=[] }) { // TODO: save stdout-style to report_log, object, and getting printable scenario }; @@ -230,7 +269,7 @@ let getDebugLine = function ({ type=``, code=`ALK00rd`, value=`` }) { let start = ``; // Get all the relevant emoji - if ( type.includes(`debug`) ) { start += `🐞`; } + if ( type.includes(`debug`) ) { start += `🐛`; } if ( type.includes(`info`) ) { start += `💡`; } if ( type.includes(`success`) ) { start += `🌈`; } if ( type.includes(`warning`) ) { start += `🔎`; } diff --git a/tests/unit_tests/log.test.js b/tests/unit_tests/log.test.js index ad770542..6ee8251f 100644 --- a/tests/unit_tests/log.test.js +++ b/tests/unit_tests/log.test.js @@ -66,7 +66,7 @@ describe(`An instance of log`, function () { } catch ( error ) { // Do nothing, go on to test the files } - const exists = fs.existsSync(`${ temp_log3.path }/${ temp_log3.debug_unexpected_filename }`); + const exists = fs.existsSync(`${ temp_log3.path }/${ temp_log3.temp_unexpected_debug_log_filename }`); expect( exists ).to.be.true; // Clean up by deleting the folder fs.rmSync(temp_log_path, { recursive: true, force: true }); @@ -546,11 +546,11 @@ function expect_debug_file_to_not_include( text ) { function expect_unexpected_output_file_to_include( text ) { - let from_file = fs.readFileSync(`${path}/${log.debug_unexpected_filename}`, 'utf8'); + let from_file = fs.readFileSync(`${path}/${log.temp_unexpected_debug_log_filename}`, 'utf8'); expect( from_file ).to.include( text ); }; function expect_unexpected_output_file_to_not_include( text ) { - let from_file = fs.readFileSync(`${path}/${log.debug_unexpected_filename}`, 'utf8'); + let from_file = fs.readFileSync(`${path}/${log.temp_unexpected_debug_log_filename}`, 'utf8'); expect( from_file ).to.not.include( text ); };