All Downloads are FREE. Search and download functionalities are using the official Maven repository.

freemarker.home.ftl Maven / Gradle / Ivy




    

    
    

    Serenity Reports

    <#include "libraries/favicon.ftl">

    <#include "libraries/common.ftl">
    <#include "libraries/jquery-ui.ftl">
    <#include "libraries/datatables.ftl">

    <#include "components/tag-list.ftl">
    <#include "components/test-outcomes.ftl">


    <#assign manualTests = testOutcomes.count("manual")>
    <#assign automatedTests = testOutcomes.count("automated")>
    <#assign totalTests = testOutcomes.count("automated")>

    <#assign testResultData = resultCounts.byTypeFor("success","pending","ignored","skipped","failure","error","compromised") >
    <#assign testLabels = resultCounts.percentageLabelsByTypeFor("success","pending","ignored","skipped","failure","error","compromised") >
    <#assign graphType="automated-and-manual-results"/>

    <#assign successfulManualTests = (manualTests.withResult("SUCCESS") > 0)>
    <#assign pendingManualTests = (manualTests.withResult("PENDING") > 0)>
    <#assign ignoredManualTests = (manualTests.withResult("IGNORED") > 0)>
    <#assign failingManualTests = (manualTests.withResult("FAILURE") > 0)>

    



${reportOptions.projectName} ${reportOptions.projectSubTitle}
<#assign tagsTitle = 'Related Tags' > <#if (testOutcomes.label == '')> <#assign resultsContext = ''> <#assign pageTitle = 'Test Results: All Tests' > <#else> <#assign resultsContext = '> ' + testOutcomes.label> <#if (currentTagType! != '')> <#assign pageTitle = " " + inflection.of(currentTagType!"").asATitle() + ': ' + tagInflector.ofTag(currentTagType!"", testOutcomes.label).toFinalView() > <#else> <#assign pageTitle = inflection.of(testOutcomes.label).asATitle() >
<#--
-->
Home <#if (parentTag?has_content && parentTag.name! != '')> <#assign titleContext = " (with " + inflection.of(parentTag.type!"").asATitle() + " " + inflection.of(parentTag.name!"").asATitle() + ")" > <#else> <#assign titleContext = "" > <#if (breadcrumbs?has_content)> <#list breadcrumbs as breadcrumb> <#assign breadcrumbReport = absoluteReportName.forRequirementOrTag(breadcrumb) /> <#assign breadcrumbTitle = formatter.renderTitle(inflection.of(breadcrumb.shortName).asATitle()) > <#assign breadcrumbType = inflection.of(breadcrumb.type).asATitle() > > <#--${formatter.htmlCompatible(breadcrumbTitle)}--> ${formatter.htmlCompatibleStoryTitle(breadcrumbTitle)} <#else> <#if currentTagType?has_content> > ${inflection.of(currentTagType!"").asATitle()} ${formatter.renderTitle(titleContext)} <#if testOutcomes.label?has_content> <#--> ${formatter.truncatedHtmlCompatible(inflection.of(testOutcomes.label).asATitle(),60)}--> > <#-- ${formatter.htmlCompatibleStoryTitle(formatter.renderTitle(inflection.of(testOutcomes.label).asATitle()))}--> ${formatter.htmlCompatibleStoryTitle(formatter.renderHeaders(inflection.of(testOutcomes.label).asATitle()))}
<#if (customFields?has_content) && (customFields?size > 0) >
<#list customFields as customField> <#list customFieldValues as customFieldValue>
${customField}
${customFieldValue}
<#include "menu.ftl"> <@main_menu selected="home" />

${pageTitle}

${testOutcomes.total} test scenarios <#if (testOutcomes.hasDataDrivenTests())> (including ${testOutcomes.totalDataRows} rows of test data) <#if (csvReport! != '')> | <#assign successReport = reportName.withPrefix(currentTag).forTestResult("success") > <#assign brokenReport = reportName.withPrefix(currentTag).forTestResult("broken") > <#assign failureReport = reportName.withPrefix(currentTag).forTestResult("failure") > <#assign errorReport = reportName.withPrefix(currentTag).forTestResult("error") > <#assign compromisedReport = reportName.withPrefix(currentTag).forTestResult("compromised") > <#assign pendingReport = reportName.withPrefix(currentTag).forTestResult("pending") > <#assign skippedReport = reportName.withPrefix(currentTag).forTestResult("skipped") > <#assign ignoredReport = reportName.withPrefix(currentTag).forTestResult("ignored") > <#assign totalCount = testOutcomes.totalScenarios.total > <#assign successCount = testOutcomes.totalScenarios.withResult("success") > <#assign pendingCount = testOutcomes.totalScenarios.withResult("pending") > <#assign ignoredCount = testOutcomes.totalScenarios.withResult("ignored") > <#assign skippedCount = testOutcomes.totalScenarios.withResult("skipped") > <#assign failureCount = testOutcomes.totalScenarios.withResult("failure") > <#assign errorCount = testOutcomes.totalScenarios.withResult("error") > <#assign brokenCount = failureCount + errorCount > <#assign compromisedCount = testOutcomes.totalScenarios.withResult("compromised") > <#assign badTestCount = failureCount + errorCount + compromisedCount> <#if testOutcomes.haveFlags()> | <#list testOutcomes.flags as flag> <#assign flagTitle = inflection.of(flag.message).inPluralForm().asATitle() > <#assign flagTag = "flag_${inflection.of(flag.message).asATitle()}" > <#assign flagReport = reportName.forTag(flagTag) > <#assign flagCount = testOutcomes.flagCountFor(flag)> ${flagTitle} (${flagCount})
<#if testOutcomes.total != 0>
<#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("success") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("pending") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("ignored") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("skipped") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestsCount("failure","error","compromised") != 0)> <#else> <#else> <#if (resultCounts.getOverallTestsCount("failure","error","compromised") != 0)> <#else> <#if (resultCounts.getOverallTestCount("failure") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("error") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if (resultCounts.getOverallTestCount("compromised") != 0)> <#else> <#if resultCounts.hasManualTests() > <#if resultCounts.hasManualTests() >
Scenario Results (including rows of test data) Automated Manual Total
 Passing  Passing ${resultCounts.getAutomatedTestCount("success")} ${resultCounts.getAutomatedTestPercentageLabel("success")}${resultCounts.getManualTestCount("success")} ${resultCounts.getManualTestPercentageLabel("success")} ${resultCounts.getOverallTestCount("success")} ${resultCounts.getOverallTestPercentageLabel("success")}
 Pending  Pending ${resultCounts.getAutomatedTestCount("pending")} ${resultCounts.getAutomatedTestPercentageLabel("pending")}${resultCounts.getManualTestCount("pending")} ${resultCounts.getManualTestPercentageLabel("pending")} ${resultCounts.getOverallTestCount("pending")} ${resultCounts.getOverallTestPercentageLabel("pending")}
 Ignored  Ignored ${resultCounts.getAutomatedTestCount("ignored")} ${resultCounts.getAutomatedTestPercentageLabel("ignored")}${resultCounts.getManualTestCount("ignored")} ${resultCounts.getManualTestPercentageLabel("ignored")} ${resultCounts.getOverallTestCount("ignored")} ${resultCounts.getOverallTestPercentageLabel("ignored")}
 Skipped  Skipped ${resultCounts.getAutomatedTestCount("skipped")} ${resultCounts.getAutomatedTestPercentageLabel("skipped")}${resultCounts.getManualTestCount("skipped")} ${resultCounts.getManualTestPercentageLabel("skipped")} ${resultCounts.getOverallTestCount("skipped")} ${resultCounts.getOverallTestPercentageLabel("skipped")}
 Unsuccessful  Unsuccessful  Unsuccessful  Unsuccessful
 Failed  Failed ${resultCounts.getAutomatedTestCount("failure")} ${resultCounts.getAutomatedTestPercentageLabel("failure")}${resultCounts.getManualTestCount("failure")} ${resultCounts.getManualTestPercentageLabel("failure")} ${resultCounts.getOverallTestCount("failure")} ${resultCounts.getOverallTestPercentageLabel("failure")}
 Broken  Broken ${resultCounts.getAutomatedTestCount("error")} ${resultCounts.getAutomatedTestPercentageLabel("error")}${resultCounts.getManualTestCount("error")} ${resultCounts.getManualTestPercentageLabel("error")} ${resultCounts.getOverallTestCount("error")} ${resultCounts.getOverallTestPercentageLabel("error")}
 Compromised  Compromised ${resultCounts.getAutomatedTestCount("compromised")} ${resultCounts.getAutomatedTestPercentageLabel("compromised")}${resultCounts.getManualTestCount("compromised")} ${resultCounts.getManualTestPercentageLabel("compromised")} ${resultCounts.getOverallTestCount("compromised")} ${resultCounts.getOverallTestPercentageLabel("compromised")}
Total ${resultCounts.getTotalAutomatedTestCount()} ${resultCounts.getTotalManualTestCount()} ${resultCounts.getTotalOverallTestCount()}
Execution Time Clock Time Fastest Test Slowest Test Average Execution Time
${totalTestDuration} ${totalClockDuration} ${minTestDuration} ${maxTestDuration} ${averageTestDuration}
<#if coverage?has_content>

Functional Coverage Overview

<#list coverage as tagCoverageByType> <#if tagCoverageByType.tagCoverage?has_content> <#if tagCoverageByType.tagCoverage?size <= 10> <#assign coverageTableClass="feature-coverage-table"> <#else> <#assign coverageTableClass="feature-coverage-table-with-pagination"> <#assign sectionTitle = inflection.of(tagCoverageByType.tagType).inPluralForm().asATitle() >

${sectionTitle}

<#assign tagCoverageEntries = tagCoverageByType.tagCoverage /> <#list tagCoverageEntries as tagCoverage>
${formatter.humanReadableFormOf(tagCoverageByType.tagType)} Scenarios % Pass Result Coverage
<#if tagCoverage.testCount = 0> ${tagCoverage.tagName} <#else> ${tagCoverage.tagName} ${tagCoverage.testCount} ${tagCoverage.successRate} <#if tagCoverage.testCount = 0> <#else> ${tagCoverage.resultIcon}
<#list tagCoverage.coverageSegments as coverageSegment>
<#if badTestCount != 0>

Test Failure Overview

Most Frequent Failures

<#list frequentFailures as frequentFailure>
${frequentFailure.resultIcon} ${frequentFailure.name} ${frequentFailure.count}

Most Unstable Features

<#list unstableFeatures as unstableFeature>
${unstableFeature.name} ${unstableFeature.failurePercentage}%
<#if tagResults?has_content >

Tags

<#list tagResults as tagResultGroup >
<#if tagResultGroup.tagType?has_content>
${inflection.of(tagResultGroup.tagType).asATitle()}

Automated Tests

<#if (automatedTestCases?has_content)> <#list automatedTestCases as scenario> <#assign outcome_icon = formatter.resultIcon().forResult(scenario.result) />
${leafRequirementType} Scenario Steps Started Duration Result
<#if scenario.parentName?has_content> ${scenario.parentName} ${scenario.title} <#if scenario.hasExamples() > (${scenario.numberOfExamples}) ${scenario.stepCount} ${scenario.formattedStartTime} ${scenario.formattedDuration} ${outcome_icon} ${scenario.result}
<#else> No automated tests were executed

Manual Tests

<#if (manualTestCases?has_content)> <#list manualTestCases as scenario> <#assign outcome_icon = formatter.resultIcon().forResult(scenario.result) />
${leafRequirementType} Scenario Steps Result
<#if scenario.parentName?has_content> ${scenario.parentName} ${scenario.title} <#if scenario.hasExamples() > (${scenario.numberOfExamples}) ${scenario.stepCount} ${outcome_icon} ${scenario.result}
<#else> No manual tests were recorded
<#if evidence?has_content>

Evidence

<#list evidence as evidenceRecord>
Scenario Title Details
${evidenceRecord.scenario} ${evidenceRecord.title} ${evidenceRecord.detailsLink}
Serenity BDD version ${serenityVersionNumber!"SNAPSHOT-BUILD"}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy