All Downloads are FREE. Search and download functionalities are using the official Maven repository.

freemarker.outcomes-with-result.ftl Maven / Gradle / Ivy




    

    
    

    Serenity Reports

    <#include "libraries/favicon.ftl">

    <#include "libraries/common.ftl">
    <#include "libraries/jquery-ui.ftl">
    <#include "libraries/datatables.ftl">

    <#include "components/tag-list.ftl">
    <#include "components/test-outcomes.ftl">


<#assign manualTests = testOutcomes.count("manual")>
<#assign automatedTests = testOutcomes.count("automated")>
<#assign totalTests = testOutcomes.count("automated")>

<#assign testResultData = resultCounts.byTypeFor("success","pending","ignored","skipped","failure","error","compromised") >
<#assign testLabels = resultCounts.percentageLabelsByTypeFor("success","pending","ignored","skipped","failure","error","compromised") >
<#assign graphType="automated-and-manual-results"/>

<#assign successfulManualTests = (manualTests.withResult("SUCCESS") > 0)>
<#assign pendingManualTests = (manualTests.withResult("PENDING") > 0)>
<#assign ignoredManualTests = (manualTests.withResult("IGNORED") > 0)>
<#assign failingManualTests = (manualTests.withResult("FAILURE") > 0)>

    



${reportOptions.projectName} ${reportOptions.projectSubTitle}
<#assign tagsTitle = 'Related Tags' > <#if (testOutcomes.label == '')> <#assign resultsContext = ''> <#assign pageTitle = 'Test Results: All Tests' > <#else> <#assign resultsContext = '> ' + testOutcomes.label> <#if (currentTagType! != '')> <#assign pageTitle = " " + inflection.of(currentTagType!"").asATitle() + ': ' + inflection.of(testOutcomes.label).asATitle() > <#else> <#assign pageTitle = inflection.of(testOutcomes.label).asATitle() >
<#--
-->
Home <#if (parentTag?has_content && parentTag.name! != '')> <#assign titleContext = " (with " + inflection.of(parentTag.type!"").asATitle() + " " + inflection.of(parentTag.name!"").asATitle() + ")" > <#else> <#assign titleContext = "" > <#if (breadcrumbs?has_content)> <#list breadcrumbs as breadcrumb> <#assign breadcrumbReport = absoluteReportName.forRequirementOrTag(breadcrumb) /> <#assign breadcrumbTitle = inflection.of(breadcrumb.shortName).asATitle() > <#assign breadcrumbType = inflection.of(breadcrumb.type).asATitle() > > <#--${formatter.htmlCompatible(breadcrumbTitle)}--> ${formatter.htmlCompatibleStoryTitle(breadcrumbTitle)} <#else> <#if currentTagType?has_content> > ${inflection.of(currentTagType!"").asATitle()} ${titleContext} <#if testOutcomes.label?has_content> <#--> ${formatter.truncatedHtmlCompatible(inflection.of(testOutcomes.label).asATitle(),60)}--> > ${formatter.htmlCompatibleStoryTitle(inflection.of(testOutcomes.label).asATitle())}
<#include "menu.ftl"> <@main_menu selected="home" />

${pageTitle}

<#assign scenarioLabel = inflection.of(testOutcomes.totalTestScenarios).times("scenario").inPluralForm().toString() > ${testOutcomes.totalMatchingScenarios} ${testOutcomes.resultTypeLabel} across ${testOutcomes.totalTestScenarios} ${scenarioLabel} <#if (csvReport! != '')> | <#if testOutcomes.resultFilterName != 'SUCCESS'>

Note that results include data-driven scenarios containing ${testOutcomes.resultTypeLabel} , which may also contain results other than ${testOutcomes.resultTypeLabel} .

<#assign successReport = reportName.withPrefix(currentTag).forTestResult("success") > <#assign brokenReport = reportName.withPrefix(currentTag).forTestResult("broken") > <#assign failureReport = reportName.withPrefix(currentTag).forTestResult("failure") > <#assign errorReport = reportName.withPrefix(currentTag).forTestResult("error") > <#assign compromisedReport = reportName.withPrefix(currentTag).forTestResult("compromised") > <#assign pendingReport = reportName.withPrefix(currentTag).forTestResult("pending") > <#assign skippedReport = reportName.withPrefix(currentTag).forTestResult("skipped") > <#assign ignoredReport = reportName.withPrefix(currentTag).forTestResult("ignored") > <#assign totalCount = testOutcomes.totalScenarios.total > <#assign successCount = testOutcomes.totalScenarios.withResult("success") > <#assign pendingCount = testOutcomes.totalScenarios.withResult("pending") > <#assign ignoredCount = testOutcomes.totalScenarios.withResult("ignored") > <#assign skippedCount = testOutcomes.totalScenarios.withResult("skipped") > <#assign failureCount = testOutcomes.totalScenarios.withResult("failure") > <#assign errorCount = testOutcomes.totalScenarios.withResult("error") > <#assign brokenCount = failureCount + errorCount > <#assign compromisedCount = testOutcomes.totalScenarios.withResult("compromised") > <#assign badTestCount = failureCount + errorCount + compromisedCount> <#if testOutcomes.haveFlags()> | <#list testOutcomes.flags as flag> <#assign flagTitle = inflection.of(flag.message).inPluralForm().asATitle() > <#assign flagTag = "flag_${inflection.of(flag.message).asATitle()}" > <#assign flagReport = reportName.forTag(flagTag) > <#assign flagCount = testOutcomes.flagCountFor(flag)> ${flagTitle} (${flagCount})
<#if testOutcomes.total != 0>
<#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("success") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("pending") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("ignored") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("skipped") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestsCount("failure","error","compromised") != 0)> <#else> <#else> <#if (resultCounts.getOverallTestsCount("failure","error","compromised") != 0)> <#else> <#if (resultCounts.getOverallTestCount("failure") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("error") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("compromised") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if resultCounts.hasManualTests() >
Scenarios Automated Manual Total
 Passing  Passing ${resultCounts.getAutomatedTestCount("success")} ${resultCounts.getAutomatedTestPercentageLabel("success")}${resultCounts.getManualTestCount("success")} ${resultCounts.getManualTestPercentageLabel("success")} ${resultCounts.getOverallTestCount("success")} ${resultCounts.getOverallTestPercentageLabel("success")}
 Pending  Pending ${resultCounts.getAutomatedTestCount("pending")} ${resultCounts.getAutomatedTestPercentageLabel("pending")}${resultCounts.getManualTestCount("pending")} ${resultCounts.getManualTestPercentageLabel("pending")} ${resultCounts.getOverallTestCount("pending")} ${resultCounts.getOverallTestPercentageLabel("pending")}
 Ignored  Ignored ${resultCounts.getAutomatedTestCount("ignored")} ${resultCounts.getAutomatedTestPercentageLabel("ignored")}${resultCounts.getManualTestCount("ignored")} ${resultCounts.getManualTestPercentageLabel("ignored")} ${resultCounts.getOverallTestCount("ignored")} ${resultCounts.getOverallTestPercentageLabel("ignored")}
 Skipped  Skipped ${resultCounts.getAutomatedTestCount("skipped")} ${resultCounts.getAutomatedTestPercentageLabel("skipped")}${resultCounts.getManualTestCount("skipped")} ${resultCounts.getManualTestPercentageLabel("skipped")} ${resultCounts.getOverallTestCount("skipped")} ${resultCounts.getOverallTestPercentageLabel("skipped")}
 Unsuccessful  Unsuccessful  Unsuccessful  Unsuccessful
 Failed  Failed ${resultCounts.getAutomatedTestCount("failure")} ${resultCounts.getAutomatedTestPercentageLabel("failure")}${resultCounts.getManualTestCount("failure")} ${resultCounts.getManualTestPercentageLabel("failure")} ${resultCounts.getOverallTestCount("failure")} ${resultCounts.getOverallTestPercentageLabel("failure")}
 Broken  Broken ${resultCounts.getAutomatedTestCount("error")} ${resultCounts.getAutomatedTestPercentageLabel("error")}${resultCounts.getManualTestCount("error")} ${resultCounts.getManualTestPercentageLabel("error")} ${resultCounts.getOverallTestCount("error")} ${resultCounts.getOverallTestPercentageLabel("error")}
 Compromised  Compromised ${resultCounts.getAutomatedTestCount("compromised")} ${resultCounts.getAutomatedTestPercentageLabel("compromised")}${resultCounts.getManualTestCount("compromised")} ${resultCounts.getManualTestPercentageLabel("compromised")} ${resultCounts.getOverallTestCount("compromised")} ${resultCounts.getOverallTestPercentageLabel("compromised")}
Total ${resultCounts.getTotalAutomatedTestCount()} ${resultCounts.getTotalManualTestCount()} ${resultCounts.getTotalOverallTestCount()}
<#if badTestCount != 0>

Test Failure Overview

Most Frequent Failures

<#list frequentFailures as frequentFailure>
${frequentFailure.resultIcon} ${frequentFailure.name} ${frequentFailure.count}

Most Unstable Features

<#list unstableFeatures as unstableFeature>
${unstableFeature.name} ${unstableFeature.failurePercentage}%
<#if tagResults?has_content >

Tags

<#list tagResults as tagResultGroup >
<#if tagResultGroup.tagType?has_content>
${inflection.of(tagResultGroup.tagType).asATitle()}

Automated Tests

<#if (automatedTestCases?has_content)> <#list automatedTestCases as scenario> <#assign outcome_icon = formatter.resultIcon().forResult(scenario.result) />
${leafRequirementType} Scenario Steps Started Duration Result
<#if scenario.parentName?has_content> ${scenario.parentName} ${scenario.title} <#if scenario.hasExamples() > (${scenario.numberOfExamples}) ${scenario.stepCount} ${scenario.formattedStartTime} ${scenario.formattedDuration} ${outcome_icon} ${scenario.result}
<#else> No automated tests were executed

Manual Tests

<#if (manualTestCases?has_content)> <#list manualTestCases as scenario> <#assign outcome_icon = formatter.resultIcon().forResult(scenario.result) />
${leafRequirementType} Scenario Steps Result
<#if scenario.parentName?has_content> ${scenario.parentName} ${scenario.title} <#if scenario.hasExamples() > (${scenario.numberOfExamples}) ${scenario.stepCount} ${outcome_icon} ${scenario.result}
<#else> No manual tests were recorded
Serenity BDD version ${serenityVersionNumber!"SNAPSHOT-BUILD"}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy