158 lines
13 KiB
HTML
158 lines
13 KiB
HTML
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
|
|
|
<html lang="en">
|
|
|
|
<head>
|
|
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
|
<title>LCOV - coverage.info - libtorch/include/ATen/ops/max.h</title>
|
|
<link rel="stylesheet" type="text/css" href="../../../../gcov.css">
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<table width="100%" border=0 cellspacing=0 cellpadding=0>
|
|
<tr><td class="title">LCOV - code coverage report</td></tr>
|
|
<tr><td class="ruler"><img src="../../../../glass.png" width=3 height=3 alt=""></td></tr>
|
|
|
|
<tr>
|
|
<td width="100%">
|
|
<table cellpadding=1 border=0 width="100%">
|
|
<tr>
|
|
<td width="10%" class="headerItem">Current view:</td>
|
|
<td width="10%" class="headerValue"><a href="../../../../index.html">top level</a> - <a href="index.html">libtorch/include/ATen/ops</a> - max.h<span style="font-size: 80%;"> (source / <a href="max.h.func-c.html">functions</a>)</span></td>
|
|
<td width="5%"></td>
|
|
<td width="5%"></td>
|
|
<td width="5%" class="headerCovTableHead">Coverage</td>
|
|
<td width="5%" class="headerCovTableHead" title="Covered + Uncovered code">Total</td>
|
|
<td width="5%" class="headerCovTableHead" title="Exercised code only">Hit</td>
|
|
</tr>
|
|
<tr>
|
|
<td class="headerItem">Test:</td>
|
|
<td class="headerValue">coverage.info</td>
|
|
<td></td>
|
|
<td class="headerItem">Lines:</td>
|
|
<td class="headerCovTableEntryHi">100.0 %</td>
|
|
<td class="headerCovTableEntry">2</td>
|
|
<td class="headerCovTableEntry">2</td>
|
|
</tr>
|
|
<tr>
|
|
<td class="headerItem">Test Date:</td>
|
|
<td class="headerValue">2024-04-30 13:17:26</td>
|
|
<td></td>
|
|
<td class="headerItem">Functions:</td>
|
|
<td class="headerCovTableEntryHi">100.0 %</td>
|
|
<td class="headerCovTableEntry">1</td>
|
|
<td class="headerCovTableEntry">1</td>
|
|
</tr>
|
|
<tr><td><img src="../../../../glass.png" width=3 height=3 alt=""></td></tr>
|
|
</table>
|
|
</td>
|
|
</tr>
|
|
|
|
<tr><td class="ruler"><img src="../../../../glass.png" width=3 height=3 alt=""></td></tr>
|
|
</table>
|
|
|
|
<table cellpadding=0 cellspacing=0 border=0>
|
|
<tr>
|
|
<td><br></td>
|
|
</tr>
|
|
<tr>
|
|
<td>
|
|
<pre class="sourceHeading"> Line data Source code</pre>
|
|
<pre class="source">
|
|
<span id="L1"><span class="lineNum"> 1</span> : #pragma once</span>
|
|
<span id="L2"><span class="lineNum"> 2</span> : </span>
|
|
<span id="L3"><span class="lineNum"> 3</span> : // @generated by torchgen/gen.py from Function.h</span>
|
|
<span id="L4"><span class="lineNum"> 4</span> : </span>
|
|
<span id="L5"><span class="lineNum"> 5</span> : #include <ATen/Context.h></span>
|
|
<span id="L6"><span class="lineNum"> 6</span> : #include <ATen/DeviceGuard.h></span>
|
|
<span id="L7"><span class="lineNum"> 7</span> : #include <ATen/TensorUtils.h></span>
|
|
<span id="L8"><span class="lineNum"> 8</span> : #include <ATen/TracerMode.h></span>
|
|
<span id="L9"><span class="lineNum"> 9</span> : #include <ATen/core/Generator.h></span>
|
|
<span id="L10"><span class="lineNum"> 10</span> : #include <ATen/core/Reduction.h></span>
|
|
<span id="L11"><span class="lineNum"> 11</span> : #include <ATen/core/Tensor.h></span>
|
|
<span id="L12"><span class="lineNum"> 12</span> : #include <c10/core/Scalar.h></span>
|
|
<span id="L13"><span class="lineNum"> 13</span> : #include <c10/core/Storage.h></span>
|
|
<span id="L14"><span class="lineNum"> 14</span> : #include <c10/core/TensorOptions.h></span>
|
|
<span id="L15"><span class="lineNum"> 15</span> : #include <c10/util/Deprecated.h></span>
|
|
<span id="L16"><span class="lineNum"> 16</span> : #include <c10/util/Optional.h></span>
|
|
<span id="L17"><span class="lineNum"> 17</span> : </span>
|
|
<span id="L18"><span class="lineNum"> 18</span> : </span>
|
|
<span id="L19"><span class="lineNum"> 19</span> : </span>
|
|
<span id="L20"><span class="lineNum"> 20</span> : #include <ATen/ops/max_ops.h></span>
|
|
<span id="L21"><span class="lineNum"> 21</span> : </span>
|
|
<span id="L22"><span class="lineNum"> 22</span> : namespace at {</span>
|
|
<span id="L23"><span class="lineNum"> 23</span> : </span>
|
|
<span id="L24"><span class="lineNum"> 24</span> : </span>
|
|
<span id="L25"><span class="lineNum"> 25</span> : // aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)</span>
|
|
<span id="L26"><span class="lineNum"> 26</span> : inline ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim=false) {</span>
|
|
<span id="L27"><span class="lineNum"> 27</span> : return at::_ops::max_dim::call(self, dim, keepdim);</span>
|
|
<span id="L28"><span class="lineNum"> 28</span> : }</span>
|
|
<span id="L29"><span class="lineNum"> 29</span> : </span>
|
|
<span id="L30"><span class="lineNum"> 30</span> : // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)</span>
|
|
<span id="L31"><span class="lineNum"> 31</span> : inline ::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) {</span>
|
|
<span id="L32"><span class="lineNum"> 32</span> : return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values);</span>
|
|
<span id="L33"><span class="lineNum"> 33</span> : }</span>
|
|
<span id="L34"><span class="lineNum"> 34</span> : // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)</span>
|
|
<span id="L35"><span class="lineNum"> 35</span> : inline ::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {</span>
|
|
<span id="L36"><span class="lineNum"> 36</span> : return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values);</span>
|
|
<span id="L37"><span class="lineNum"> 37</span> : }</span>
|
|
<span id="L38"><span class="lineNum"> 38</span> : </span>
|
|
<span id="L39"><span class="lineNum"> 39</span> : // aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)</span>
|
|
<span id="L40"><span class="lineNum"> 40</span> : inline ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {</span>
|
|
<span id="L41"><span class="lineNum"> 41</span> : return at::_ops::max_names_dim::call(self, dim, keepdim);</span>
|
|
<span id="L42"><span class="lineNum"> 42</span> : }</span>
|
|
<span id="L43"><span class="lineNum"> 43</span> : </span>
|
|
<span id="L44"><span class="lineNum"> 44</span> : // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)</span>
|
|
<span id="L45"><span class="lineNum"> 45</span> : inline ::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {</span>
|
|
<span id="L46"><span class="lineNum"> 46</span> : return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values);</span>
|
|
<span id="L47"><span class="lineNum"> 47</span> : }</span>
|
|
<span id="L48"><span class="lineNum"> 48</span> : // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)</span>
|
|
<span id="L49"><span class="lineNum"> 49</span> : inline ::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {</span>
|
|
<span id="L50"><span class="lineNum"> 50</span> : return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values);</span>
|
|
<span id="L51"><span class="lineNum"> 51</span> : }</span>
|
|
<span id="L52"><span class="lineNum"> 52</span> : </span>
|
|
<span id="L53"><span class="lineNum"> 53</span> : // aten::max(Tensor self) -> Tensor</span>
|
|
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC tlaBgGNC"> 116 : inline at::Tensor max(const at::Tensor & self) {</span></span>
|
|
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 116 : return at::_ops::max::call(self);</span></span>
|
|
<span id="L56"><span class="lineNum"> 56</span> : }</span>
|
|
<span id="L57"><span class="lineNum"> 57</span> : </span>
|
|
<span id="L58"><span class="lineNum"> 58</span> : // aten::max.other(Tensor self, Tensor other) -> Tensor</span>
|
|
<span id="L59"><span class="lineNum"> 59</span> : inline at::Tensor max(const at::Tensor & self, const at::Tensor & other) {</span>
|
|
<span id="L60"><span class="lineNum"> 60</span> : return at::_ops::max_other::call(self, other);</span>
|
|
<span id="L61"><span class="lineNum"> 61</span> : }</span>
|
|
<span id="L62"><span class="lineNum"> 62</span> : </span>
|
|
<span id="L63"><span class="lineNum"> 63</span> : // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)</span>
|
|
<span id="L64"><span class="lineNum"> 64</span> : inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {</span>
|
|
<span id="L65"><span class="lineNum"> 65</span> : return at::_ops::max_out::call(self, other, out);</span>
|
|
<span id="L66"><span class="lineNum"> 66</span> : }</span>
|
|
<span id="L67"><span class="lineNum"> 67</span> : // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)</span>
|
|
<span id="L68"><span class="lineNum"> 68</span> : inline at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {</span>
|
|
<span id="L69"><span class="lineNum"> 69</span> : return at::_ops::max_out::call(self, other, out);</span>
|
|
<span id="L70"><span class="lineNum"> 70</span> : }</span>
|
|
<span id="L71"><span class="lineNum"> 71</span> : </span>
|
|
<span id="L72"><span class="lineNum"> 72</span> : // aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)</span>
|
|
<span id="L73"><span class="lineNum"> 73</span> : inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) {</span>
|
|
<span id="L74"><span class="lineNum"> 74</span> : return at::_ops::max_unary_out::call(self, out);</span>
|
|
<span id="L75"><span class="lineNum"> 75</span> : }</span>
|
|
<span id="L76"><span class="lineNum"> 76</span> : // aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)</span>
|
|
<span id="L77"><span class="lineNum"> 77</span> : inline at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {</span>
|
|
<span id="L78"><span class="lineNum"> 78</span> : return at::_ops::max_unary_out::call(self, out);</span>
|
|
<span id="L79"><span class="lineNum"> 79</span> : }</span>
|
|
<span id="L80"><span class="lineNum"> 80</span> : </span>
|
|
<span id="L81"><span class="lineNum"> 81</span> : }</span>
|
|
</pre>
|
|
</td>
|
|
</tr>
|
|
</table>
|
|
<br>
|
|
|
|
<table width="100%" border=0 cellspacing=0 cellpadding=0>
|
|
<tr><td class="ruler"><img src="../../../../glass.png" width=3 height=3 alt=""></td></tr>
|
|
<tr><td class="versionInfo">Generated by: <a href="https://github.com//linux-test-project/lcov" target="_parent">LCOV version 2.0-1</a></td></tr>
|
|
</table>
|
|
<br>
|
|
|
|
</body>
|
|
</html>
|