add paper
Browse files- .gitattributes +1 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/00README.json +21 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_case.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_fig1.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_overview.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_smtr.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2case_long.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2:CTC Beats Encoder-Decoder Models in Scene Text Recognition.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/X_suppl.tex +364 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/data_wenshi.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/iccv.sty +508 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/ieeenat_fullname.bst +1448 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.bbl +343 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.bib +1225 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.tex +571 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/preamble.tex +10 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/readme.txt +4 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/svtrv2_ic15_6_h_fig.pdf +3 -0
- SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/svtrv2_noic15_6_h_fig1.pdf +3 -0
.gitattributes
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 2 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/00README.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"sources" : [
|
| 3 |
+
{
|
| 4 |
+
"usage" : "toplevel",
|
| 5 |
+
"filename" : "main.tex"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"usage" : "ignore",
|
| 9 |
+
"filename" : "ieeenat_fullname.bst"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"usage" : "ignore",
|
| 13 |
+
"filename" : "main.bib"
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"version" : 2,
|
| 17 |
+
"texlive_version" : 2023,
|
| 18 |
+
"process" : {
|
| 19 |
+
"compiler" : "pdflatex"
|
| 20 |
+
}
|
| 21 |
+
}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_case.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93c0b872b4bb2758ea04b6b5dc9518b819bb04f662a897cdc10807d16a8d9dee
|
| 3 |
+
size 421786
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_fig1.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3145ba80af8fd44fe3778d747c3e2d5fe6f83438696aa988829579abfacdc67f
|
| 3 |
+
size 108659
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_overview.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d1966ac8ca429780e4c9526316eb79834a62a4aec23fb3daa02ba21c344c110
|
| 3 |
+
size 478311
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2_smtr.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37c1d51429196d6951db63d915392cbc1423fe230d7decf3d078eedc0fe389ff
|
| 3 |
+
size 51947
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2case_long.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15f653c7368b2be9819d2a29f80e697656cdc337abb57f2e10e5ef4acd687590
|
| 3 |
+
size 99332
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/SVTRv2:CTC Beats Encoder-Decoder Models in Scene Text Recognition.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b32253f598d0a52806c2e9b067218275ddc8472dc88bd2b8d86c7b62c4eb16f
|
| 3 |
+
size 9382608
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/X_suppl.tex
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\clearpage
|
| 2 |
+
\setcounter{page}{1}
|
| 3 |
+
% \setcounter{section}{0}
|
| 4 |
+
\maketitlesupplementary
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
\begin{table*}[t]\footnotesize
|
| 9 |
+
\centering
|
| 10 |
+
\setlength{\tabcolsep}{3.5 pt}{
|
| 11 |
+
\begin{tabular}{c|c|ccccccc|cccccccc|c|c|c|c}
|
| 12 |
+
\multicolumn{21}{c}{\setlength{\tabcolsep}{3pt}{\begin{tabular}{
|
| 13 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 14 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 15 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 16 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 17 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 18 |
+
>{\columncolor[HTML]{FFFFC7}}c c
|
| 19 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 20 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 21 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 22 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 23 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 24 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 25 |
+
>{\columncolor[HTML]{ECF4FF}}c }
|
| 26 |
+
\toprule
|
| 27 |
+
\textit{IIIT5k} & \textit{SVT} & \textit{ICDAR2013} & \textit{ICDAR2015} & \textit{SVTP} & \textit{CUTE80} & $\|$ & \textit{Curve} & \textit{Multi-Oriented} & \textit{Artistic} & \textit{Contextless} & \textit{Salient} & \textit{Multi-Words} & \textit{General}
|
| 28 |
+
\end{tabular}}} \\
|
| 29 |
+
\toprule
|
| 30 |
+
ID & Method & \multicolumn{6}{c}{\cellcolor[HTML]{FFFFC7}Common Benchmarks (\textit{Com})} & Avg & \multicolumn{7}{c}{\cellcolor[HTML]{ECF4FF}Union14M-Benchmark (\textit{U14M})} & Avg & \textit{LTB} & \textit{OST} & \textit{Size} & \textit{FPS} \\
|
| 31 |
+
\midrule
|
| 32 |
+
0 & SVTR (w/ TPS) & 98.1 & 96.1 & 96.4 & 89.2 & 92.1 & 95.8 & 94.62 & 82.2 & 86.1 & 69.7 & 75.1 & 81.6 & 73.8 & 80.7 & 78.44 & 0.0 & 71.2 & 19.95 & 141 \\
|
| 33 |
+
1 & 0 + w/o TPS & 98.0 & 97.1 & 97.3 & 88.6 & 90.7 & 95.8 & 94.58 & 76.2 & 44.5 & 67.8 & 78.7 & 75.2 & 77.9 & 77.8 & 71.17 & 45.1 & 67.8 & 18.10 & 161 \\
|
| 34 |
+
\midrule
|
| 35 |
+
2 & 1 + $\frac{H}{16}\rightarrow\frac{H}{8}$ & 98.9 & 97.4 & 97.9 & 89.7 & 91.8 & 96.9 & 95.41 & 82.2 & 64.3 & 70.2 & 80.0 & 80.9 & 80.6 & 80.5 & 76.95 & 44.8 & 69.5 & 18.10 & 145 \\
|
| 36 |
+
3 & 2 + Conv$^2$ & 98.7 & 97.1 & 97.1 & 89.6 & 91.6 & 97.6 & 95.28 & 82.9 & 65.6 & 73.2 & 80.0 & 80.5 & 81.6 & 80.8 & 77.78 & 47.4 & 71.1 & 17.77 & 159 \\
|
| 37 |
+
4 & 3 + MSR & 98.7 & 98.0 & 97.4 & 89.4 & 91.6 & 97.6 & 95.44 & 87.4 & 83.7 & 75.4 & 80.9 & 81.9 & 83.5 & 82.8 & 82.22 & 50.9 & 72.5 & 17.77 & 159 \\
|
| 38 |
+
5 & 4 + FRM & 98.8 & \textbf{98.1} & 98.4 & 89.8 & 92.9 & \textbf{99.0} & 96.16 & 88.2 & 86.2 & 77.5 & 83.2 & 83.9 & 84.6 & 83.5 & 83.86 & 50.7 & 74.9 & 19.76 & 143 \\
|
| 39 |
+
6 & 5 + SGM & \textbf{99.2} & 98.0 & \textbf{98.7} & \textbf{91.1} & \textbf{93.5} & \textbf{99.0} & \textbf{96.57} & \textbf{90.6} & \textbf{89.0} & \textbf{79.3} & \textbf{86.1} & \textbf{86.2} & \textbf{86.7} & \textbf{85.1} & \textbf{86.14} & \textbf{50.2} & \textbf{80.0} & 19.76 & 143 \\
|
| 40 |
+
\bottomrule
|
| 41 |
+
\end{tabular}}
|
| 42 |
+
\caption{Ablation study of the proposed strategies on \textit{Com} and \textit{U14M}, along with their model sizes and FPS.}
|
| 43 |
+
\label{tab:add_ablation}
|
| 44 |
+
\end{table*}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
\section{More Details of Ablation Study}
|
| 48 |
+
|
| 49 |
+
SVTRv2 builds upon the foundation of SVTR by introducing several innovative strategies aimed at addressing challenges in recognizing irregular text and modeling linguistic context. The key advancements and their impact are detailed as follows:
|
| 50 |
+
|
| 51 |
+
\textbf{Removal of the rectification Module and introduction of MSR and FRM}. In the original SVTR, a rectification module is employed to recognize irregular text. However, this approach negatively impacts the recognition of long text. To overcome this limitation, SVTRv2 removes the rectification module entirely. To effectively handle irregular text without compromising the CTC model's ability to generalize to long text, MSR and FRM are introduced.
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
\textbf{Improvement in feature resolution}. SVTR extracts visual representations of size \(\frac{H}{16} \times \frac{W}{4} \times D_2\) from input images of size \(H \times W \times 3\). While this approach is effective for regular text, it struggles with retaining the distinct characteristics of irregular text. SVTRv2 doubles the height resolution ($\frac{H}{16}\rightarrow\frac{H}{8}$) of visual features, producing features of size \(\frac{H}{8} \times \frac{W}{4} \times D_2\), thereby improving its capacity to recognize irregular text.
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
\textbf{Refinement of local mixing mechanisms}. SVTR employs a hierarchical vision transformer structure, leveraging two mixing strategies: Local Mixing is implemented through a sliding window-based local attention mechanism, and Global Mixing employs the standard global multi-head self-attention mechanism. SVTRv2 retains the hierarchical vision transformer structure and the global multi-head self-attention mechanism for Global Mixing. For Local Mixing, SVTRv2 introduces a pivotal change. Specifically, the sliding window-based local attention is replaced with two consecutive group convolutions (Conv$^2$) \cite{he2016resnet}. It is important to highlight that unlike previous CNNs, there is no normalization or activation layer between the two convolutions.
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
\textbf{Semantic guidance module}. The original SVTR model relies solely on the CTC framework for both training and inference. However, CTC is inherently limited in its ability to model linguistic context. SVTRv2 addresses this by introducing a Semantic Guidance Module (SGM) during training. SGM facilitates the visual encoder in capturing linguistic information, enriching the feature representation. Importantly, SGM is discarded during inference, ensuring that the efficiency of CTC-based decoding remains unaffected while still benefiting from its contributions during the training phase.
|
| 61 |
+
|
| 62 |
+
\subsection{Progressive Ablation Experiments}
|
| 63 |
+
|
| 64 |
+
To comprehensively evaluate the contributions of every SVTRv2 upgrade, a series of progressive ablation experiments are conducted. Tab.~\ref{tab:add_ablation} outlines the results, along with the following observations:
|
| 65 |
+
|
| 66 |
+
1. Baseline (ID 0): The original SVTR serves as the baseline for comparison.
|
| 67 |
+
|
| 68 |
+
2. Rectification Module Removal (ID~1) reveals that while the rectification module (e.g., TPS) improves irregular text recognition accuracy, it hinders the model's ability to recognize long text. This confirms its limitations in balancing different recognition tasks.
|
| 69 |
+
|
| 70 |
+
3. Improvement in Feature Resolution (ID~2): Doubling the height resolution (\(\frac{H}{16} \rightarrow \frac{H}{8}\)) significantly boosts performance across challenging datasets, particularly for irregular text.
|
| 71 |
+
|
| 72 |
+
4. Replacement of Local Attention with Conv$^2$ (ID~3): Replacing the sliding window-based local attention with two consecutive group convolutions (Conv$^2$) yields improvements in artistic text, with a 3.0\% increase in accuracy. This result highlights the efficacy of convolution-based approaches in capturing character-level nuances, such as strokes and textures, thereby improving its ability to recognize artistic and irregular text.
|
| 73 |
+
|
| 74 |
+
5. Incorporation of MSR and FRM (ID~4 and ID~5): These components collectively enhance accuracy on irregular text benchmarks (e.g., \textit{Curve}), surpassing the rectification-based SVTR (ID~0) by 6.0\%, without compromising the CTC model's ability to generalize to long text.
|
| 75 |
+
|
| 76 |
+
6. Integration of SGM (ID~6): Adding SGM yields significant gains on multiple datasets, improving accuracy on \textit{OST} by 5.11\% and \textit{U14M} by 2.28\%.
|
| 77 |
+
|
| 78 |
+
It can be summarized as that, by integrating Conv$^2$, MSR, FRM, and SGM, SVTRv2 significantly improves performance in recognizing irregular text and modeling linguistic context over SVTR, while still maintaining robust long-text recognition capabilities and preserving the efficiency of CTC-based inference.
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
\section{SVTRv2 Variants}
|
| 85 |
+
There are several hyper-parameters in SVTRv2, including the depth of channel ($D_i$) and the number of heads at each stage, the number of mixing blocks ($N_i$) and their permutation. By varying them, SVTRv2 architectures with different capacities could be obtained and we construct three typical ones, i.e., SVTRv2-T (Tiny), SVTRv2-S (Small), SVTRv2-B (Base). Their detail configurations are shown in Tab.~\ref{tab:booktab1}.
|
| 86 |
+
|
| 87 |
+
In Tab.~\ref{tab:booktab1}, $[L]_m { [G]_n}$ denotes that the first \textit{m} mixing blocks in SVTRv2 utilize local mixing, while the last \textit{n} mixing blocks employ global mixing. Specifically, in SVTRv2-T and SVTRv2-S, all blocks in the first stage and the first three blocks in the second stage use local mixing. The last three blocks in the second stage, as well as all blocks in the third stage, are global mixing. In the case of SVTRv2-B, all blocks in the first stage and the first two blocks in the second stage use local mixing, whereas the last four blocks in the second stage and all blocks in the third stage adopt global mixing.
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
\begin{table}[t]\footnotesize
|
| 92 |
+
\centering
|
| 93 |
+
\setlength{\tabcolsep}{4pt}{
|
| 94 |
+
\begin{tabular}{c|c|c|c|c}
|
| 95 |
+
\toprule
|
| 96 |
+
Models & $\left[D_0, D_1, D_2 \right]$ & $[N_1, N_2, N_3]$ & Heads & Permutation \\
|
| 97 |
+
\midrule
|
| 98 |
+
SVTRv2-T & {[}64,128,256{]} & {[}3,6,3{]} & {[}2,4,8{]} & $[L]_6 { [G]_6}$ \\
|
| 99 |
+
SVTRv2-S & {[}96,192,384{]} & {[}3,6,3{]} & {[}3,6,12{]} & $[L]_6 {[G]_6 }$ \\
|
| 100 |
+
SVTRv2-B & {[}128,256,384{]} & {[}6,6,6{]} & {[}4,8,12{]} & $[L]_8 { [G]_{10}}$ \\
|
| 101 |
+
\bottomrule
|
| 102 |
+
\end{tabular}}
|
| 103 |
+
\caption{Architecture specifications of SVTRv2 variants.}
|
| 104 |
+
\label{tab:booktab1}
|
| 105 |
+
\end{table}
|
| 106 |
+
|
| 107 |
+
\begin{figure}[t]
|
| 108 |
+
\centering
|
| 109 |
+
\includegraphics[width=0.47\textwidth]{data_wenshi.pdf}
|
| 110 |
+
\caption{Relationships of the three real-world training sets and their overlapping with \textit{U14M}.}
|
| 111 |
+
\label{fig:data}
|
| 112 |
+
\end{figure}
|
| 113 |
+
|
| 114 |
+
\begin{table*}[t]\footnotesize
|
| 115 |
+
\centering
|
| 116 |
+
\setlength{\tabcolsep}{4pt}{
|
| 117 |
+
\begin{tabular}{c|ccccccc}
|
| 118 |
+
\toprule
|
| 119 |
+
& \textit{Curve} & \textit{Multi-Oriented} & \textit{Artistic} & \textit{Contextless} & \textit{Salient} & \textit{Multi-Words} & \textit{General} \\
|
| 120 |
+
|
| 121 |
+
& 2,426 & 1,369 & 900 & 779 & 1,585 & 829 & 400,000 \\
|
| 122 |
+
\midrule
|
| 123 |
+
\textit{Real} \cite{BautistaA22PARSeq} & 1,276 & 440 & 432 & 326 & 431 & 193 & 254,174 \\
|
| 124 |
+
\textit{REBU-Syn} \cite{Rang_2024_CVPR_clip4str} & 1,285 & 443 & 462 & 363 & 442 & 289 & 260,575 \\
|
| 125 |
+
\textit{U14M-Train} \cite{jiang2023revisiting} & 9 & 3 & 30 & 37 & 11 & 96 & 6,401 \\
|
| 126 |
+
|
| 127 |
+
\bottomrule
|
| 128 |
+
\end{tabular}}
|
| 129 |
+
\caption{Overlapping statistics between three real-world training sets and \textit{U14M}.}
|
| 130 |
+
\label{tab:data}
|
| 131 |
+
\end{table*}
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
\begin{algorithm}[t]
|
| 135 |
+
\caption{Inference Time}
|
| 136 |
+
\label{alg:inferencetime}
|
| 137 |
+
\SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output}
|
| 138 |
+
\Input{A set of images $\mathcal{I}$ with size $|\mathcal{I}| = 3000$, batch size $B = 1$, $N$ text lengths}
|
| 139 |
+
\Output{Overall inference time of the model}
|
| 140 |
+
\BlankLine
|
| 141 |
+
Initialize two lists: \texttt{total\_time\_list} and \texttt{count\_list} of size $N$, initialized to 0\;
|
| 142 |
+
\For{each image $I_j$ in $\mathcal{I}$ where $j \in \{1, 2, \ldots, 3000\}$}{
|
| 143 |
+
Determine the text length $l_i$ for image $I_j$\;
|
| 144 |
+
Perform inference on $I_j$ with text length $l_i$\;
|
| 145 |
+
Record inference time $t_{ij}$\;
|
| 146 |
+
\texttt{total\_time\_list[$l_i$]} += $t_{ij}$\;
|
| 147 |
+
\texttt{count\_list[$l_i$]} += 1\;
|
| 148 |
+
}
|
| 149 |
+
\BlankLine
|
| 150 |
+
Initialize \texttt{avg\_time\_list}\;
|
| 151 |
+
\For{each text length $l_i$ where $i \in \{1, 2, \ldots, N\}$}{
|
| 152 |
+
\If{\texttt{count\_list[$i$]} $> 0$}{
|
| 153 |
+
\texttt{avg\_time\_list[$i$]} = \texttt{total\_time\_list[$i$]} / \texttt{count\_list[$i$]}\;
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
\BlankLine
|
| 157 |
+
Compute the final average inference time:
|
| 158 |
+
\[
|
| 159 |
+
\texttt{inference\_time} = \frac{1}{N} \sum_{i=1}^{N} \texttt{avg\_time\_list[$i$]}
|
| 160 |
+
\]
|
| 161 |
+
\Return \texttt{inference\_time}\;
|
| 162 |
+
\end{algorithm}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
\section{More Details of Real-World Datasets}
|
| 170 |
+
|
| 171 |
+
For English recognition, we train models on real-world datasets, from which the models exhibit stronger recognition capability \cite{BautistaA22PARSeq,jiang2023revisiting,Rang_2024_CVPR_clip4str}. There are three large-scale real-world training sets, i.e., the \textit{Real} dataset \cite{BautistaA22PARSeq}, \textit{REBU-Syn} \cite{Rang_2024_CVPR_clip4str}, and \textit{Union14M-L} (\textit{U14M-Train}) \cite{jiang2023revisiting}. However, as shown in Fig.~\ref{fig:data} and Tab.~\ref{tab:data}, the former two significantly overlap with \textit{U14M}, thus not suitable for model training when using \textit{U14M} at the evaluation dataset. Surprisingly, \textit{U14M-Train} is also overlapped with \textit{U14M} in nearly 6.5k text instances across the seven subsets. It means the models trained based on \textit{U14M-Train} suffer from data leakage when tested on \textit{U14M}, thus the results reported by \cite{jiang2023revisiting} should be updated. To this end, we create a filtered version of \textit{Union14M-L}, termed as \textit{U14M-Filter}, by filtering these overlapping instances from the training set. This new dataset is used to train SVTRv2 and other 24 methods we reproduced.
|
| 172 |
+
|
| 173 |
+
\section{More Details of Inference Time}
|
| 174 |
+
|
| 175 |
+
In terms of the inference time, we do not utilize any acceleration framework and instead employ PyTorch's dynamic graph mode on one NVIDIA 1080Ti GPU. We first measure the inference time for 3,000 images with a batch size of \textit{1}, calculating the average inference time for each text length. We then compute the arithmetic mean of the average time across all text lengths to determine the overall inference time of the model. Algorithm~\ref{alg:inferencetime} details the process of measuring inference time.
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
\section{Results when Trained on Synthetic Datasets}
|
| 184 |
+
|
| 185 |
+
Previous research typically follows a typical evaluation protocol, where models are trained on synthetic datasets and validated using \textit{Com}, the six widely recognized real-world benchmarks. Following this protocol, we also train SVTRv2 and other models on synthetic datasets. In addition to evaluating SVTRv2 on \textit{Com}, we assess its performance on \textit{U14M}. The results offer a comprehensive evaluation of the model's generalization capabilities. For methods that have not reported performance on challenging benchmarks, we conduct additional evaluations using their publicly available models and present these results for comparative analysis. As illustrated in Tab.~\ref{tab:syn_sota}, models trained on synthetic datasets exhibit notably lower performance compared to those trained on large-scale real-world datasets (see Tab.~\ref{tab:sota}). This performance drop is particularly pronounced on challenging benchmarks. These findings highlight the critical importance of real-world datasets in improving recognition accuracy.
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
Despite trained on less diverse synthetic datasets, SVTRv2 also exhibits competitive performance. On irregular text benchmarks, such as \textit{Curve} and \textit{Multi-Oriented}, SVTR achieves strong results, largely due to its integrated rectification module \cite{shi2019aster}, which is particularly adept at handling irregular text patterns, even when trained on synthetic datasets. Notably, SVTRv2 achieves a substantial 4.8\% improvement over SVTR on \textit{Curve}, further demonstrating its enhanced capacity to address irregular text. Overall, these results demonstrate that, even when trained solely on synthetic datasets, SVTRv2 exhibits strong generalization capabilities, effectively handling complex and challenging text recognition scenarios.
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
\begin{table*}[t]\footnotesize
|
| 192 |
+
\centering
|
| 193 |
+
\setlength{\tabcolsep}{3pt}{
|
| 194 |
+
\begin{tabular}{r|c|c|ccccccc|cccccccc|c}
|
| 195 |
+
\multicolumn{19}{c}{\setlength{\tabcolsep}{3pt}{\begin{tabular}{
|
| 196 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 197 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 198 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 199 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 200 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 201 |
+
>{\columncolor[HTML]{FFFFC7}}c c
|
| 202 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 203 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 204 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 205 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 206 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 207 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 208 |
+
>{\columncolor[HTML]{ECF4FF}}c }
|
| 209 |
+
\toprule
|
| 210 |
+
\textit{IIIT5k} & \textit{SVT} & \textit{ICDAR2013} & \textit{ICDAR2015} & \textit{SVTP} & \textit{CUTE80} & $\|$ & \textit{Curve} & \textit{Multi-Oriented} & \textit{Artistic} & \textit{Contextless} & \textit{Salient} & \textit{Multi-Words} & \textit{General}
|
| 211 |
+
\end{tabular}}} \\
|
| 212 |
+
\toprule
|
| 213 |
+
Method & Venue & Encoder & \multicolumn{6}{c}{\cellcolor[HTML]{FFFFC7}Common Benchmarks (\textit{Com})} & Avg & \multicolumn{7}{c}{\cellcolor[HTML]{ECF4FF}Union14M-Benchmark (\textit{U14M})} & Avg & \textit{Size} \\
|
| 214 |
+
\midrule
|
| 215 |
+
ASTER \cite{shi2019aster} & TPAMI 2019 & ResNet+LSTM & 93.3 & 90.0 & 90.8 & 74.7 & 80.2 & 80.9 & 84.98 & 34.0 & 10.2 & 27.7 & 33.0 & 48.2 & 27.6 & 39.8 & 31.50 & 27.2 \\
|
| 216 |
+
NRTR \cite{Sheng2019nrtr} & ICDAR 2019 & Stem+TF$_6$ & 90.1 & 91.5 & 95.8 & 79.4 & 86.6 & 80.9 & 87.38 & 31.7 & 4.40 & 36.6 & 37.3 & 30.6 & 54.9 & 48.0 & 34.79 & 31.7 \\
|
| 217 |
+
MORAN \cite{pr2019MORAN} & PR 2019 & ResNet+LSTM & 91.0 & 83.9 & 91.3 & 68.4 & 73.3 & 75.7 & 80.60 & 8.90 & 0.70 & 29.4 & 20.7 & 17.9 & 23.8 & 35.2 & 19.51 & 17.4 \\
|
| 218 |
+
SAR \cite{li2019sar} & AAAI 2019 & ResNet+LSTM & 91.5 & 84.5 & 91.0 & 69.2 & 76.4 & 83.5 & 82.68 & 44.3 & 7.70 & 42.6 & 44.2 & 44.0 & 51.2 & 50.5 & 40.64 & 57.7 \\
|
| 219 |
+
DAN \cite{wang2020aaai_dan} & AAAI 2020 & ResNet+FPN & 93.4 & 87.5 & 92.1 & 71.6 & 78.0 & 81.3 & 83.98 & 26.7 & 1.50 & 35.0 & 40.3 & 36.5 & 42.2 & 42.1 & 32.04 & 27.7 \\
|
| 220 |
+
SRN \cite{yu2020srn} & CVPR 2020 & ResNet+FPN & 94.8 & 91.5 & 95.5 & 82.7 & 85.1 & 87.8 & 89.57 & 63.4 & 25.3 & 34.1 & 28.7 & 56.5 & 26.7 & 46.3 & 40.14 & 54.7 \\
|
| 221 |
+
SEED* \cite{cvpr2020seed} & CVPR 2020 & ResNet+LSTM & 93.8 & 89.6 & 92.8 & 80.0 & 81.4 & 83.6 & 86.87 & 40.4 & 15.5 & 32.1 & 32.5 & 54.8 & 35.6 & 39.0 & 35.70 & 24.0 \\
|
| 222 |
+
AutoSTR* \cite{zhang2020autostr} & ECCV 2020 & NAS+LSTM & 94.7 & 90.9 & 94.2 & 81.8 & 81.7 & - & - & 47.7 & 17.9 & 30.8 & 36.2 & 64.2 & 38.7 & 41.3 & 39.54 & 6.00 \\
|
| 223 |
+
RoScanner \cite{yue2020robustscanner} & ECCV 2020 & ResNet & 95.3 & 88.1 & 94.8 & 77.1 & 79.5 & 90.3 & 87.52 & 43.6 & 7.90 & 41.2 & 42.6 & 44.9 & 46.9 & 39.5 & 38.09 & 48.0 \\
|
| 224 |
+
ABINet \cite{fang2021abinet} & CVPR 2021 & ResNet+TF$_3$ & 96.2 & 93.5 & 97.4 & 86.0 & 89.3 & 89.2 & 91.93 & 59.5 & 12.7 & 43.3 & 38.3 & 62.0 & 50.8 & 55.6 & 46.03 & 36.7 \\
|
| 225 |
+
VisionLAN \cite{Wang_2021_visionlan} & ICCV 2021 & ResNet+TF$_3$ & 95.8 & 91.7 & 95.7 & 83.7 & 86.0 & 88.5 & 90.23 & 57.7 & 14.2 & 47.8 & 48.0 & 64.0 & 47.9 & 52.1 & 47.39 & 32.8 \\
|
| 226 |
+
PARSeq* \cite{BautistaA22PARSeq} & ECCV 2022 & ViT-S & 97.0 & 93.6 & 97.0 & 86.5 & 88.9 & 92.2 & 92.53 & 63.9 & 16.7 & 52.5 & 54.3 & 68.2 & 55.9 & 56.9 & 52.62 & 23.8 \\
|
| 227 |
+
MATRN \cite{MATRN} & ECCV 2022 & ResNet+TF$_3$ & 96.6 & 95.0 & 97.9 & 86.6 & 90.6 & 93.5 & 93.37 & 63.1 & 13.4 & 43.8 & 41.9 & 66.4 & 53.2 & 57.0 & 48.40 & 44.2 \\
|
| 228 |
+
MGP-STR* \cite{mgpstr} & ECCV 2022 & ViT-B & 96.4 & 94.7 & 97.3 & 87.2 & 91.0 & 90.3 & 92.82 & 55.2 & 14.0 & 52.8 & 48.5 & 65.2 & 48.8 & 59.1 & 49.09 & 148 \\
|
| 229 |
+
LevOCR* \cite{levocr} & ECCV 2022 & ResNet+TF$_3$ & 96.6 & 94.4 & 96.7 & 86.5 & 88.8 & 90.6 & 92.27 & 52.8 & 10.7 & 44.8 & 51.9 & 61.3 & 54.0 & 58.1 & 47.66 & 109 \\
|
| 230 |
+
CornerTF* \cite{xie2022toward_cornertrans} & ECCV 2022 & CornerEncoder & 95.9 & 94.6 & 97.8 & 86.5 & 91.5 & 92.0 & 93.05 & 62.9 & 18.6 & 56.1 & 58.5 & 68.6 & 59.7 & 61.0 & 55.07 & 86.0 \\
|
| 231 |
+
SIGA* \cite{Guan_2023_CVPR_SIGA} & CVPR 2023 & ViT-B & 96.6 & 95.1 & 97.8 & 86.6 & 90.5 & 93.1 & 93.28 & 59.9 & 22.3 & 49.0 & 50.8 & 66.4 & 58.4 & 56.2 & 51.85 & 113 \\
|
| 232 |
+
CCD* \cite{Guan_2023_ICCV_CCD} & ICCV 2023 & ViT-B & 97.2 & 94.4 & 97.0 & 87.6 & 91.8 & 93.3 & 93.55 & 66.6 & 24.2 & \textbf{63.9} & 64.8 & 74.8 & 62.4 & 64.0 & 60.10 & 52.0 \\
|
| 233 |
+
LISTER* \cite{iccv2023lister} & ICCV 2023 & FocalNet-B & 96.9 & 93.8 & 97.9 & 87.5 & 89.6 & 90.6 & 92.72 & 56.5 & 17.2 & 52.8 & 63.5 & 63.2 & 59.6 & 65.4 & 54.05 & 49.9 \\
|
| 234 |
+
LPV-B* \cite{ijcai2023LPV} & IJCAI 2023 & SVTR-B & 97.3 & 94.6 & 97.6 & 87.5 & 90.9 & 94.8 & 93.78 & 68.3 & 21.0 & 59.6 & 65.1 & 76.2 & 63.6 & 62.0 & 59.40 & 35.1 \\
|
| 235 |
+
CDistNet* \cite{zheng2024cdistnet} & IJCV 2024 & ResNet+TF$_3$ & 96.4 & 93.5 & 97.4 & 86.0 & 88.7 & 93.4 & 92.57 & 69.3 & 24.4 & 49.8 & 55.6 & 72.8 & 64.3 & 58.5 & 56.38 & 65.5 \\
|
| 236 |
+
CAM* \cite{yang2024class_cam} & PR 2024 & ConvNeXtV2-B & 97.4 & \textbf{96.1} & 97.2 & 87.8 & 90.6 & 92.4 & 93.58 & 63.1 & 19.4 & 55.4 & 58.5 & 72.7 & 51.4 & 57.4 & 53.99 & 135 \\
|
| 237 |
+
BUSNet \cite{Wei_2024_busnet} & AAAI 2024 & ViT-S & 96.2 & 95.5 & 98.3 & 87.2 & 91.8 & 91.3 & 93.38 & - & - & - & - & - & - & - & - & 56.8 \\
|
| 238 |
+
DCTC \cite{Zhang_Lu_Liao_Huang_Li_Wang_Peng_2024_DCTC} & AAAI 2024 & SVTR-L & 96.9 & 93.7 & 97.4 & 87.3 & 88.5 & 92.3 & 92.68 & - & - & - & - & - & - & - & - & 40.8 \\
|
| 239 |
+
OTE \cite{Xu_2024_CVPR_OTE} & CVPR 2024 & SVTR-B & 96.4 & 95.5 & 97.4 & 87.2 & 89.6 & 92.4 & 93.08 & - & - & - & - & - & - & - & - & 25.2 \\
|
| 240 |
+
CPPD \cite{du2023cppd} & TPAMI 2025 & SVTR-B & 97.6 & 95.5 & 98.2 & 87.9 & 90.9 & 92.7 & 93.80 & 65.5 & 18.6 & 56.0 & 61.9 & 71.0 & 57.5 & 65.8 & 56.63 & 26.8 \\
|
| 241 |
+
IGTR-AR \cite{du2024igtr} & TPAMI 2025 & SVTR-B & \textbf{98.2} & 95.7 & \textbf{98.6} & \textbf{88.4} & \textbf{92.4} & 95.5 & \textbf{94.78} & \textbf{78.4} & 31.9 & 61.3 & 66.5 & \textbf{80.2} & 69.3 & \textbf{67.9} & \textbf{65.07} & 24.1
|
| 242 |
+
\\
|
| 243 |
+
SMTR \cite{du2024smtr} & AAAI 2025 & FocalSVTR& 97.4 & 94.9 & 97.4 & \textbf{88.4} & 89.9 & \textbf{96.2} & 94.02 & 74.2 & 30.6 & 58.5 & 67.6 & 79.6 & \textbf{75.1} & \textbf{67.9} & 64.79 & 15.8 \\
|
| 244 |
+
\midrule
|
| 245 |
+
CRNN \cite{shi2017crnn} & TPAMI2016 & ResNet+LSTM & 82.9 & 81.6 & 91.1 & 69.4 & 70.0 & 65.5 & 76.75 & 7.50 & 0.90 & 20.7 & 25.6 & 13.9 & 25.6 & 32.0 & 18.03 & 8.30 \\
|
| 246 |
+
SVTR* \cite{duijcai2022svtr} & IJCAI2022 & SVTR-B & 96.0 & 91.5 & 97.1 & 85.2 & 89.9 & 91.7 & 91.90 & 69.8 & \textbf{37.7} & 47.9 & 61.4 & 66.8 & 44.8 & 61.0 & 55.63 & 24.6 \\
|
| 247 |
+
\cellcolor[HTML]{EFEFEF}SVTRv2 & \cellcolor[HTML]{EFEFEF}- & \cellcolor[HTML]{EFEFEF}SVTRv2-B & \cellcolor[HTML]{EFEFEF}97.7 & \cellcolor[HTML]{EFEFEF}94.0 & \cellcolor[HTML]{EFEFEF}97.3 & \cellcolor[HTML]{EFEFEF}88.1 & \cellcolor[HTML]{EFEFEF}91.2 & \cellcolor[HTML]{EFEFEF}95.8 & \cellcolor[HTML]{EFEFEF}94.02 & \cellcolor[HTML]{EFEFEF}74.6 & \cellcolor[HTML]{EFEFEF}25.2 & \cellcolor[HTML]{EFEFEF}57.6 & \cellcolor[HTML]{EFEFEF}\textbf{69.7} & \cellcolor[HTML]{EFEFEF}77.9 & \cellcolor[HTML]{EFEFEF}68.0 & \cellcolor[HTML]{EFEFEF}66.9 & \cellcolor[HTML]{EFEFEF}62.83 & \cellcolor[HTML]{EFEFEF}19.8 \\
|
| 248 |
+
\bottomrule
|
| 249 |
+
\end{tabular}}
|
| 250 |
+
\caption{Results of SVTRv2 and existing models when trained on synthetic datasets (\textit{ST} + \textit{MJ}) \cite{Synthetic,jaderberg14synthetic}. * represents that the results on \textit{U14M} are evaluated using the model they released.}
|
| 251 |
+
\label{tab:syn_sota}
|
| 252 |
+
\end{table*}
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
\section{Qualitative Analysis of Recognition Results}
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
The SVTRv2 model achieved an average accuracy of 96.57\% on \textit{Com} (see Tab. \ref{tab:sota}). To investigate the underlying causes of the remaining 3.43\% of recognition errors, we conducted a detailed analysis of the misclassified samples, as illustrated in Fig.~\ref{fig:noic15} and Fig.~\ref{fig:ic15}. While previous research has typically categorized \textit{Com} into \textit{regular} and \textit{irregular} text. However, these error samples indicate that the majority of incorrectly recognized text is not irregular. This suggests that, under the current training paradigm using large-scale real-world datasets, a more rigorous manual screening process is warranted for common benchmarks.
|
| 261 |
+
|
| 262 |
+
\begin{table}[t]\footnotesize
|
| 263 |
+
\centering
|
| 264 |
+
\setlength{\tabcolsep}{1pt}{\begin{tabular}{c|cccc|c|c}
|
| 265 |
+
\toprule
|
| 266 |
+
& Blurred & Artistic & Incomplete & Other & Total & Label$_{err}$ \\
|
| 267 |
+
\midrule
|
| 268 |
+
IIIT5k \cite{IIIT5K} & 0 & 16 & 1 & 4 & 21 & 4 \\
|
| 269 |
+
SVT \cite{Wang2011SVT} & 4 & 4 & 4 & 0 & 12 & 0 \\
|
| 270 |
+
ICDAR 2013 \cite{icdar2013} & 2 & 2 & 4 & 2 & 10 & 2 \\
|
| 271 |
+
ICDAR 2015 \cite{icdar2015} & 48 & 19 & 42 & 13 & 122 & 35 \\
|
| 272 |
+
SVTP \cite{SVTP} & 7 & 6 & 12 & 7 & 32 & 4 \\
|
| 273 |
+
CUTE80 \cite{Risnumawan2014cute} & 0 & 1 & 0 & 0 & 1 & 1 \\
|
| 274 |
+
\midrule
|
| 275 |
+
Total & 61 & 48 & 63 & 26 & 198 & 46 \\
|
| 276 |
+
& 30.81\% & 24.24\% & 31.82\% & 13.13\% & 100\% & \\
|
| 277 |
+
\bottomrule
|
| 278 |
+
\end{tabular}}
|
| 279 |
+
\caption{Distribution of bad cases for SVTRv2 on \textit{Com}.}
|
| 280 |
+
\label{tab:bad_case}
|
| 281 |
+
\end{table}
|
| 282 |
+
|
| 283 |
+
Based on this one-by-one manual viewing, we identified five primary causes of recognition errors: (1) blurred, (2) artistic, (3) incomplete text, (4) others, and (5) image text labeling errors (Label$_{err}$). Specifically, the blurring text includes issues such as low resolution, motion blur, or extreme lighting conditions. The artistic text category refers to unconventional fonts, commonly found in business signage, as well as some handwritten text. Incomplete text arises when characters are obscured by objects or lost due to improper cropping, requiring contextual inference. Image text labeling errors occur when the given text labels contain inaccuracies or include characters with phonetic symbols. As shown in Tab.~\ref{tab:bad_case}, after excluding samples affected by labeling inconsistencies, the remaining recognition errors primarily stemmed from blurred (30.81\%), artistic (24.24\%), and incomplete text (31.82\%). This result highlights that SVTRv2's recognition performance needs further improvement, particularly in handling complex scenarios involving these challenging text types.
|
| 284 |
+
|
| 285 |
+
\section{Standardized Model Training Settings}
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
The optimal hyperparameters for training different models vary and are not universally fixed. However, key factors such as training epochs, data augmentations, input size, and evaluation protocols significantly influence model accuracy. To ensure fair and unbiased performance comparisons, we standardize these factors across all models, as outlined in Tab.~\ref{tab:setting}. This uniform training and evaluation framework ensures consistency while allowing each model to approach its best accuracy. To maximize fairness, we conducted extensive hyperparameter tuning for model-specific settings, including the optimizer, learning rate, and regularization strategies. This rigorous optimization led to significant accuracy improvements of 5–10\% for most models compared to their default configurations. For instance, MAERec’s accuracy increased from 78.6\% to 85.2\%, demonstrating the effectiveness of training settings. These improvements underscore the reliability of our results and highlight the importance of carefully optimizing hyperparameters for meaningful model comparisons.
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
\begin{table*}
|
| 300 |
+
\centering
|
| 301 |
+
\begin{tabular}{c|p{10cm}}
|
| 302 |
+
\toprule
|
| 303 |
+
\textbf{Setting} & \textbf{Detail} \\
|
| 304 |
+
\midrule
|
| 305 |
+
\textbf{Training Set} &
|
| 306 |
+
For training, when the text length of a text image exceeds 25, samples with text length $\leq 25$ are randomly selected from the training set to ensure models are only exposed to short texts (length $\leq 25$). \\
|
| 307 |
+
\midrule
|
| 308 |
+
\textbf{Test Sets} &
|
| 309 |
+
For all test sets except the long-text test set (\textit{LTB}), text images with text length $> 25$ are filtered. Text length is calculated by removing spaces and non-94-character-set special characters. \\
|
| 310 |
+
\midrule
|
| 311 |
+
\textbf{Input Size} &
|
| 312 |
+
Unless a method explicitly requires a dynamic size, models use a fixed input size of $32\times128$. If a model performs incorrectly with $32\times128$ during training, the original size is used. The test input size matches the training size. \\
|
| 313 |
+
\midrule
|
| 314 |
+
\textbf{Data Augmentation} &
|
| 315 |
+
All models use the data augmentation strategy employed by PARSeq. \\
|
| 316 |
+
\midrule
|
| 317 |
+
\textbf{Training Epochs} &
|
| 318 |
+
Unless pre-training is required, all models are trained for 20 epochs. \\
|
| 319 |
+
\midrule
|
| 320 |
+
\textbf{Optimizer} &
|
| 321 |
+
AdamW is the default optimizer. If training fails to converge with AdamW, Adam or other optimizers are used. \\
|
| 322 |
+
\midrule
|
| 323 |
+
\textbf{Batch Size} &
|
| 324 |
+
Maximum batch size for all models is 1024. If single-GPU training is not feasible, 2 GPUs (512 per GPU) or 4 GPUs (256 per GPU) are used. If 4-GPU training runs out of memory, the batch size is halved, and the learning rate is adjusted accordingly. \\
|
| 325 |
+
\midrule
|
| 326 |
+
\textbf{Learning Rate} &
|
| 327 |
+
Default learning rate for batch size 1024 is 0.00065. The learning rate is adjusted multiple times to achieve the best results. \\
|
| 328 |
+
\midrule
|
| 329 |
+
\textbf{Learning Rate Scheduler} &
|
| 330 |
+
A linear warm-up for 1.5 epochs is followed by a OneCycle scheduler. \\
|
| 331 |
+
\midrule
|
| 332 |
+
\textbf{Weight Decay} &
|
| 333 |
+
Default weight decay is 0.05. NormLayer and Bias parameters have a weight decay of 0. \\
|
| 334 |
+
\midrule
|
| 335 |
+
\textbf{EMA or Similar Tricks} &
|
| 336 |
+
No EMA or similar tricks are used for any model. \\
|
| 337 |
+
\midrule
|
| 338 |
+
\textbf{Evaluation Protocols} &
|
| 339 |
+
Word accuracy is evaluated after filtering special characters and converting all text to lowercase. \\
|
| 340 |
+
\bottomrule
|
| 341 |
+
\end{tabular}
|
| 342 |
+
|
| 343 |
+
\caption{A uniform training and evaluation setting to maintain consistency across all settings while simultaneously enabling each model to achieve its best possible accuracy.}
|
| 344 |
+
\label{tab:setting}
|
| 345 |
+
\end{table*}
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
\begin{figure*}[t]
|
| 349 |
+
\centering
|
| 350 |
+
\includegraphics[width=0.98\textwidth]{svtrv2_noic15_6_h_fig1.pdf}
|
| 351 |
+
\caption{The bad cases of SVTRv2 in IIIT5k \cite{IIIT5K}, SVT \cite{Wang2011SVT}, ICDAR 2013 \cite{icdar2013}, SVTP \cite{SVTP} and CUTE80 \cite{Risnumawan2014cute}. Labels, the predicted result, and the predicted score are denoted as \textcolor{blue}{Text$_{label}$} $|$ Text$_{pred}~|~$ Score$_{pred}$. Yellow, red, blue, and green boxes indicate blurred, artistic fonts, incomplete text, and label-inconsistent samples, respectively. Other samples have no box.}
|
| 352 |
+
\label{fig:noic15}
|
| 353 |
+
\end{figure*}
|
| 354 |
+
|
| 355 |
+
\begin{figure*}[t]
|
| 356 |
+
\centering
|
| 357 |
+
\includegraphics[width=0.98\textwidth]{svtrv2_ic15_6_h_fig.pdf}
|
| 358 |
+
\caption{The bad cases of SVTRv2 in ICDAR 2015 \cite{icdar2015}. Labels, the predicted result, and the predicted score are denoted as \textcolor{blue}{Text$_{label}$} $|$ Text$_{pred}~|~$ Score$_{pred}$. Yellow, red, blue, and green boxes indicate blurred, artistic fonts, incomplete text, and label-inconsistent samples, respectively. Other samples have no box.}
|
| 359 |
+
\label{fig:ic15}
|
| 360 |
+
\end{figure*}
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/data_wenshi.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ef454db6444c7fcc3b9d91595b7d70ee7f893f2ef9ddbd6582c890f5a7837ee
|
| 3 |
+
size 70713
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/iccv.sty
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% ---------------------------------------------------------------
|
| 2 |
+
%
|
| 3 |
+
% No guarantee is given that the format corresponds perfectly to
|
| 4 |
+
% IEEE 8.5" x 11" Proceedings, but most features should be ok.
|
| 5 |
+
%
|
| 6 |
+
% ---------------------------------------------------------------
|
| 7 |
+
% with LaTeX2e:
|
| 8 |
+
% =============
|
| 9 |
+
%
|
| 10 |
+
% use as
|
| 11 |
+
% \documentclass[times,10pt,twocolumn]{article}
|
| 12 |
+
% \usepackage[options]{ICCV}
|
| 13 |
+
% \usepackage{times}
|
| 14 |
+
%
|
| 15 |
+
% "options" should be replaced by
|
| 16 |
+
% * "review" for submitting a paper for review,
|
| 17 |
+
% * "final" for the camera ready, and
|
| 18 |
+
% * "rebuttal" for the author rebuttal.
|
| 19 |
+
%
|
| 20 |
+
% specify references as
|
| 21 |
+
% {\small
|
| 22 |
+
% \bibliographystyle{ieee}
|
| 23 |
+
% \bibliography{...your files...}
|
| 24 |
+
% }
|
| 25 |
+
% ---------------------------------------------------------------
|
| 26 |
+
|
| 27 |
+
\NeedsTeXFormat{LaTeX2e}[1999/12/01]
|
| 28 |
+
\ProvidesPackage{iccv}[2025 LaTeX class for IEEE ICCV]
|
| 29 |
+
|
| 30 |
+
\RequirePackage{times} % Integrate Times for here
|
| 31 |
+
\RequirePackage{xspace}
|
| 32 |
+
\RequirePackage[dvipsnames]{xcolor}
|
| 33 |
+
\RequirePackage{graphicx}
|
| 34 |
+
\RequirePackage{amsmath}
|
| 35 |
+
\RequirePackage{amssymb}
|
| 36 |
+
\RequirePackage{booktabs}
|
| 37 |
+
\RequirePackage[numbers,sort&compress]{natbib}
|
| 38 |
+
\setlength{\bibsep}{1pt plus 1pt minus 1pt}
|
| 39 |
+
|
| 40 |
+
\RequirePackage{silence} % Suppress unwanted warnings
|
| 41 |
+
\hbadness=10000 \vbadness=10000 \vfuzz=30pt \hfuzz=30pt
|
| 42 |
+
\WarningFilter{latexfont}{Font shape declaration}
|
| 43 |
+
\WarningFilter{latex}{Font shape}
|
| 44 |
+
\WarningFilter[rebuttal]{latex}{No \author given}
|
| 45 |
+
\RequirePackage{etoolbox}
|
| 46 |
+
|
| 47 |
+
% Use modern caption package to allow for sub-figures etc.
|
| 48 |
+
% Reproduces the original CVPR/ICCV style as closely as possible.
|
| 49 |
+
\RequirePackage[format=plain,labelformat=simple,labelsep=period,font=small,compatibility=false]{caption}
|
| 50 |
+
\RequirePackage[font=footnotesize,skip=3pt,subrefformat=parens]{subcaption}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
\newtoggle{iccvfinal} % Camera-ready version
|
| 54 |
+
\newtoggle{iccvrebuttal} % Rebuttal
|
| 55 |
+
\newtoggle{iccvpagenumbers} % Force page numbers (in camera ready)
|
| 56 |
+
\toggletrue{iccvfinal}
|
| 57 |
+
\togglefalse{iccvrebuttal}
|
| 58 |
+
\togglefalse{iccvpagenumbers}
|
| 59 |
+
\DeclareOption{review}{\togglefalse{iccvfinal}\toggletrue{iccvpagenumbers}}
|
| 60 |
+
\DeclareOption{rebuttal}{\togglefalse{iccvfinal}\toggletrue{iccvrebuttal}}
|
| 61 |
+
\DeclareOption{pagenumbers}{\toggletrue{iccvpagenumbers}}
|
| 62 |
+
\DeclareOption*{\PackageWarning{iccv}{Unkown option `\CurrentOption'}}
|
| 63 |
+
\ProcessOptions\relax
|
| 64 |
+
|
| 65 |
+
% Don't warn about missing author for rebuttal
|
| 66 |
+
\iftoggle{iccvrebuttal}{%
|
| 67 |
+
\ActivateWarningFilters[rebuttal]
|
| 68 |
+
}{}
|
| 69 |
+
|
| 70 |
+
% Breaking lines for URLs in the bib
|
| 71 |
+
\RequirePackage[hyphens]{url}
|
| 72 |
+
\Urlmuskip=0mu plus 1mu\relax
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
% ---------------------------------------------------------------
|
| 76 |
+
% Inlined version of the obsolete "everyshi-2001-05-15" package.
|
| 77 |
+
\newcommand{\@EveryShipout@Hook}{}
|
| 78 |
+
\newcommand{\@EveryShipout@AtNextHook}{}
|
| 79 |
+
\newcommand*{\EveryShipout}[1]
|
| 80 |
+
{\g@addto@macro\@EveryShipout@Hook{#1}}
|
| 81 |
+
\newcommand*{\AtNextShipout}[1]
|
| 82 |
+
{\g@addto@macro\@EveryShipout@AtNextHook{#1}}
|
| 83 |
+
\newcommand{\@EveryShipout@Shipout}{%
|
| 84 |
+
\afterassignment\@EveryShipout@Test
|
| 85 |
+
\global\setbox\@cclv= %
|
| 86 |
+
}
|
| 87 |
+
\newcommand{\@EveryShipout@Test}{%
|
| 88 |
+
\ifvoid\@cclv\relax
|
| 89 |
+
\aftergroup\@EveryShipout@Output
|
| 90 |
+
\else
|
| 91 |
+
\@EveryShipout@Output
|
| 92 |
+
\fi%
|
| 93 |
+
}
|
| 94 |
+
\newcommand{\@EveryShipout@Output}{%
|
| 95 |
+
\@EveryShipout@Hook%
|
| 96 |
+
\@EveryShipout@AtNextHook%
|
| 97 |
+
\gdef\@EveryShipout@AtNextHook{}%
|
| 98 |
+
\@EveryShipout@Org@Shipout\box\@cclv%
|
| 99 |
+
}
|
| 100 |
+
\newcommand{\@EveryShipout@Org@Shipout}{}
|
| 101 |
+
\newcommand*{\@EveryShipout@Init}{%
|
| 102 |
+
\message{ABD: EveryShipout initializing macros}%
|
| 103 |
+
\let\@EveryShipout@Org@Shipout\shipout
|
| 104 |
+
\let\shipout\@EveryShipout@Shipout
|
| 105 |
+
}
|
| 106 |
+
\AtBeginDocument{\@EveryShipout@Init}
|
| 107 |
+
% ---------------------------------------------------------------
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
% ---------------------------------------------------------------
|
| 111 |
+
% Inlined simplified version of the "eso-pic" package.
|
| 112 |
+
\newcommand\LenToUnit[1]{#1\@gobble}
|
| 113 |
+
\newcommand\AtPageUpperLeft[1]{%
|
| 114 |
+
\begingroup
|
| 115 |
+
\@tempdima=0pt\relax\@tempdimb=\ESO@yoffsetI\relax
|
| 116 |
+
\put(\LenToUnit{\@tempdima},\LenToUnit{\@tempdimb}){#1}%
|
| 117 |
+
\endgroup
|
| 118 |
+
}
|
| 119 |
+
\newcommand\AtPageLowerLeft[1]{\AtPageUpperLeft{%
|
| 120 |
+
\put(0,\LenToUnit{-\paperheight}){#1}}}
|
| 121 |
+
\newcommand\AtPageCenter[1]{\AtPageUpperLeft{%
|
| 122 |
+
\put(\LenToUnit{.5\paperwidth},\LenToUnit{-.5\paperheight}){#1}}%
|
| 123 |
+
}
|
| 124 |
+
\newcommand\AtTextUpperLeft[1]{%
|
| 125 |
+
\begingroup
|
| 126 |
+
\setlength\@tempdima{1in}%
|
| 127 |
+
\ifodd\c@page%
|
| 128 |
+
\advance\@tempdima\oddsidemargin%
|
| 129 |
+
\else%
|
| 130 |
+
\advance\@tempdima\evensidemargin%
|
| 131 |
+
\fi%
|
| 132 |
+
\@tempdimb=\ESO@yoffsetI\relax\advance\@tempdimb-1in\relax%
|
| 133 |
+
\advance\@tempdimb-\topmargin%
|
| 134 |
+
\advance\@tempdimb-\headheight\advance\@tempdimb-\headsep%
|
| 135 |
+
\put(\LenToUnit{\@tempdima},\LenToUnit{\@tempdimb}){#1}%
|
| 136 |
+
\endgroup
|
| 137 |
+
}
|
| 138 |
+
\newcommand\AtTextLowerLeft[1]{\AtTextUpperLeft{%
|
| 139 |
+
\put(0,\LenToUnit{-\textheight}){#1}}}
|
| 140 |
+
\newcommand\AtTextCenter[1]{\AtTextUpperLeft{%
|
| 141 |
+
\put(\LenToUnit{.5\textwidth},\LenToUnit{-.5\textheight}){#1}}}
|
| 142 |
+
\newcommand{\ESO@HookI}{} \newcommand{\ESO@HookII}{}
|
| 143 |
+
\newcommand{\ESO@HookIII}{}
|
| 144 |
+
\newcommand{\AddToShipoutPicture}{%
|
| 145 |
+
\@ifstar{\g@addto@macro\ESO@HookII}{\g@addto@macro\ESO@HookI}}
|
| 146 |
+
\newcommand{\ClearShipoutPicture}{\global\let\ESO@HookI\@empty}
|
| 147 |
+
\newcommand\ESO@isMEMOIR[1]{}
|
| 148 |
+
\@ifclassloaded{memoir}{\renewcommand\ESO@isMEMOIR[1]{#1}}{}
|
| 149 |
+
\newcommand{\@ShipoutPicture}{%
|
| 150 |
+
\bgroup
|
| 151 |
+
\@tempswafalse%
|
| 152 |
+
\ifx\ESO@HookI\@empty\else\@tempswatrue\fi%
|
| 153 |
+
\ifx\ESO@HookII\@empty\else\@tempswatrue\fi%
|
| 154 |
+
\ifx\ESO@HookIII\@empty\else\@tempswatrue\fi%
|
| 155 |
+
\if@tempswa%
|
| 156 |
+
\@tempdima=1in\@tempdimb=-\@tempdima%
|
| 157 |
+
\advance\@tempdimb\ESO@yoffsetI%
|
| 158 |
+
\ESO@isMEMOIR{%
|
| 159 |
+
\advance\@tempdima\trimedge%
|
| 160 |
+
\advance\@tempdima\paperwidth%
|
| 161 |
+
\advance\@tempdima-\stockwidth%
|
| 162 |
+
\if@twoside\ifodd\c@page\else%
|
| 163 |
+
\advance\@tempdima-2\trimedge%
|
| 164 |
+
\advance\@tempdima-\paperwidth%
|
| 165 |
+
\advance\@tempdima\stockwidth%
|
| 166 |
+
\fi\fi%
|
| 167 |
+
\advance\@tempdimb\trimtop}%
|
| 168 |
+
\unitlength=1pt%
|
| 169 |
+
\global\setbox\@cclv\vbox{%
|
| 170 |
+
\vbox{\let\protect\relax
|
| 171 |
+
\pictur@(0,0)(\strip@pt\@tempdima,\strip@pt\@tempdimb)%
|
| 172 |
+
\ESO@HookIII\ESO@HookI\ESO@HookII%
|
| 173 |
+
\global\let\ESO@HookII\@empty%
|
| 174 |
+
\endpicture}%
|
| 175 |
+
\nointerlineskip%
|
| 176 |
+
\box\@cclv}%
|
| 177 |
+
\fi
|
| 178 |
+
\egroup
|
| 179 |
+
}
|
| 180 |
+
\EveryShipout{\@ShipoutPicture}
|
| 181 |
+
\RequirePackage{keyval}
|
| 182 |
+
\newif\ifESO@dvips\ESO@dvipsfalse
|
| 183 |
+
\newif\ifESO@texcoord\ESO@texcoordfalse
|
| 184 |
+
|
| 185 |
+
\AtBeginDocument{%
|
| 186 |
+
\IfFileExists{color.sty}
|
| 187 |
+
{%
|
| 188 |
+
\RequirePackage{color}
|
| 189 |
+
\let\ESO@color=\color\let\ESO@colorbox=\colorbox
|
| 190 |
+
\let\ESO@fcolorbox=\fcolorbox
|
| 191 |
+
}{}
|
| 192 |
+
\@ifundefined{Gin@driver}{}%
|
| 193 |
+
{%
|
| 194 |
+
\ifx\Gin@driver\@empty\else%
|
| 195 |
+
\filename@parse{\Gin@driver}\def\reserved@a{dvips}%
|
| 196 |
+
\ifx\filename@base\reserved@a\ESO@dvipstrue\fi%
|
| 197 |
+
\fi
|
| 198 |
+
}%
|
| 199 |
+
\ifx\pdfoutput\undefined\else
|
| 200 |
+
\ifx\pdfoutput\relax\else
|
| 201 |
+
\ifcase\pdfoutput\else
|
| 202 |
+
\ESO@dvipsfalse%
|
| 203 |
+
\fi
|
| 204 |
+
\fi
|
| 205 |
+
\fi
|
| 206 |
+
}
|
| 207 |
+
\ifESO@texcoord
|
| 208 |
+
\def\ESO@yoffsetI{0pt}\def\ESO@yoffsetII{-\paperheight}
|
| 209 |
+
\else
|
| 210 |
+
\def\ESO@yoffsetI{\paperheight}\def\ESO@yoffsetII{0pt}
|
| 211 |
+
\fi
|
| 212 |
+
% ---------------------------------------------------------------
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
\typeout{ICCV 8.5 x 11-Inch Proceedings Style `iccv.sty'.}
|
| 216 |
+
|
| 217 |
+
% ten point helvetica bold required for captions
|
| 218 |
+
% eleven point times bold required for second-order headings
|
| 219 |
+
% in some sites the name of the fonts may differ,
|
| 220 |
+
% change the name here:
|
| 221 |
+
\font\iccvtenhv = phvb at 8pt % *** IF THIS FAILS, SEE iccv.sty ***
|
| 222 |
+
\font\elvbf = ptmb scaled 1100
|
| 223 |
+
\font\tenbf = ptmb scaled 1000
|
| 224 |
+
|
| 225 |
+
% If the above lines give an error message, try to comment them and
|
| 226 |
+
% uncomment these:
|
| 227 |
+
%\font\iccvtenhv = phvb7t at 8pt
|
| 228 |
+
%\font\elvbf = ptmb7t scaled 1100
|
| 229 |
+
%\font\tenbf = ptmb7t scaled 1000
|
| 230 |
+
|
| 231 |
+
% set dimensions of columns, gap between columns, and paragraph indent
|
| 232 |
+
\setlength{\textheight}{8.875in}
|
| 233 |
+
\setlength{\textwidth}{6.875in}
|
| 234 |
+
\setlength{\columnsep}{0.3125in}
|
| 235 |
+
\setlength{\topmargin}{0in}
|
| 236 |
+
\setlength{\headheight}{0in}
|
| 237 |
+
\setlength{\headsep}{0in}
|
| 238 |
+
\setlength{\parindent}{1pc}
|
| 239 |
+
\setlength{\oddsidemargin}{-0.1875in}
|
| 240 |
+
\setlength{\evensidemargin}{-0.1875in}
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
% Suppress page numbers when the appropriate option is given
|
| 244 |
+
\iftoggle{iccvpagenumbers}{}{%
|
| 245 |
+
\pagestyle{empty}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
\AtBeginDocument{%
|
| 249 |
+
% Print an error if document class other than article is used
|
| 250 |
+
\@ifclassloaded{article}{}{%
|
| 251 |
+
\PackageError{iccv}{Package only meant to be used with document class `article'}{Change document class to `article'.}
|
| 252 |
+
}
|
| 253 |
+
% Print a warning if incorrect options for article are specified
|
| 254 |
+
\@ifclasswith{article}{10pt}{}{%
|
| 255 |
+
\PackageWarningNoLine{iccv}{Incorrect font size specified - ICCV requires 10-point fonts. Please load document class `article' with `10pt' option}
|
| 256 |
+
}
|
| 257 |
+
\@ifclasswith{article}{twocolumn}{}{%
|
| 258 |
+
\PackageWarningNoLine{iccv}{Single column document - ICCV requires papers to have two-column layout. Please load document class `article' with `twocolumn' option}
|
| 259 |
+
}
|
| 260 |
+
\@ifclasswith{article}{letterpaper}{}{%
|
| 261 |
+
\PackageWarningNoLine{iccv}{Incorrect paper size - ICCV uses paper size `letter'. Please load document class `article' with `letterpaper' option}
|
| 262 |
+
}
|
| 263 |
+
% Print a warning if hyperref is not loaded and/or if the pagebackref option is missing
|
| 264 |
+
\iftoggle{iccvfinal}{%
|
| 265 |
+
\@ifpackageloaded{hyperref}{}{%
|
| 266 |
+
\PackageWarningNoLine{iccv}{Package `hyperref' is not loaded, but highly recommended for camera-ready version}
|
| 267 |
+
}
|
| 268 |
+
}{%
|
| 269 |
+
\@ifpackageloaded{hyperref}{
|
| 270 |
+
\@ifpackagewith{hyperref}{pagebackref}{}{
|
| 271 |
+
\PackageWarningNoLine{iccv}{Package `hyperref' is not loaded with option `pagebackref', which is strongly recommended for review version}
|
| 272 |
+
}
|
| 273 |
+
}{%
|
| 274 |
+
\PackageWarningNoLine{iccv}{Package `hyperref' is not loaded, but strongly recommended for review version}
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
\def\@maketitle{
|
| 280 |
+
\newpage
|
| 281 |
+
\null
|
| 282 |
+
\iftoggle{iccvrebuttal}{\vspace*{-.3in}}{\vskip .375in}
|
| 283 |
+
\begin{center}
|
| 284 |
+
% smaller title font only for rebuttal
|
| 285 |
+
\iftoggle{iccvrebuttal}{{\large \bf \@title \par}}{{\Large \bf \@title \par}}
|
| 286 |
+
% additional two empty lines at the end of the title
|
| 287 |
+
\iftoggle{iccvrebuttal}{\vspace*{-22pt}}{\vspace*{24pt}}{
|
| 288 |
+
\large
|
| 289 |
+
\lineskip .5em
|
| 290 |
+
\begin{tabular}[t]{c}
|
| 291 |
+
\iftoggle{iccvfinal}{
|
| 292 |
+
\@author
|
| 293 |
+
}{
|
| 294 |
+
\iftoggle{iccvrebuttal}{}{
|
| 295 |
+
Anonymous \confName~submission\\
|
| 296 |
+
\vspace*{1pt}\\
|
| 297 |
+
Paper ID \paperID
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
\end{tabular}
|
| 301 |
+
\par
|
| 302 |
+
}
|
| 303 |
+
% additional small space at the end of the author name
|
| 304 |
+
\vskip .5em
|
| 305 |
+
% additional empty line at the end of the title block
|
| 306 |
+
\vspace*{12pt}
|
| 307 |
+
\end{center}
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
\def\abstract{%
|
| 311 |
+
% Suppress page numbers when the appropriate option is given
|
| 312 |
+
\iftoggle{iccvpagenumbers}{}{%
|
| 313 |
+
\thispagestyle{empty}
|
| 314 |
+
}
|
| 315 |
+
\centerline{\large\bf Abstract}%
|
| 316 |
+
\vspace*{12pt}\noindent%
|
| 317 |
+
\it\ignorespaces%
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
\def\endabstract{%
|
| 321 |
+
% additional empty line at the end of the abstract
|
| 322 |
+
\vspace*{12pt}
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
\def\affiliation#1{\gdef\@affiliation{#1}} \gdef\@affiliation{}
|
| 326 |
+
|
| 327 |
+
% correct heading spacing and type
|
| 328 |
+
\def\iccvsection{\@startsection {section}{1}{\z@}
|
| 329 |
+
{-10pt plus -2pt minus -2pt}{7pt} {\large\bf}}
|
| 330 |
+
\def\iccvssect#1{\iccvsection*{#1}}
|
| 331 |
+
\def\iccvsect#1{\iccvsection{\texorpdfstring{\hskip -1em.~}{}#1}}
|
| 332 |
+
\def\section{\@ifstar\iccvssect\iccvsect}
|
| 333 |
+
|
| 334 |
+
\def\iccvsubsection{\@startsection {subsection}{2}{\z@}
|
| 335 |
+
{-8pt plus -2pt minus -2pt}{5pt} {\elvbf}}
|
| 336 |
+
\def\iccvssubsect#1{\iccvsubsection*{#1}}
|
| 337 |
+
\def\iccvsubsect#1{\iccvsubsection{\texorpdfstring{\hskip -1em.~}{}#1}}
|
| 338 |
+
\def\subsection{\@ifstar\iccvssubsect\iccvsubsect}
|
| 339 |
+
|
| 340 |
+
\def\iccvsubsubsection{\@startsection {subsubsection}{3}{\z@}
|
| 341 |
+
{-6pt plus -2pt minus -2pt}{3pt} {\tenbf}}
|
| 342 |
+
\def\iccvssubsubsect#1{\iccvsubsubsection*{#1}}
|
| 343 |
+
\def\iccvsubsubsect#1{\iccvsubsubsection{\texorpdfstring{\hskip -1em.~}{}#1}}
|
| 344 |
+
\def\subsubsection{\@ifstar\iccvssubsubsect\iccvsubsubsect}
|
| 345 |
+
|
| 346 |
+
%% --------- Page background marks: Ruler and confidentiality (only for review and rebuttal)
|
| 347 |
+
\iftoggle{iccvfinal}{
|
| 348 |
+
% In review and rebuttal mode, we use the "lineno" package for numbering lines.
|
| 349 |
+
% When switching to a different mode, the "\@LN" macro may remain in cached .aux files,
|
| 350 |
+
% leading to build errors (https://github.com/cvpr-org/author-kit/issues/49).
|
| 351 |
+
% Defining the macro as empty fixes that (https://tex.stackexchange.com/a/125779).
|
| 352 |
+
\makeatletter
|
| 353 |
+
\providecommand{\@LN}[2]{}
|
| 354 |
+
\makeatother
|
| 355 |
+
}{
|
| 356 |
+
% ----- define vruler
|
| 357 |
+
\makeatletter
|
| 358 |
+
\newbox\iccvrulerbox
|
| 359 |
+
\newcount\iccvrulercount
|
| 360 |
+
\newdimen\iccvruleroffset
|
| 361 |
+
\newdimen\cv@lineheight
|
| 362 |
+
\newdimen\cv@boxheight
|
| 363 |
+
\newbox\cv@tmpbox
|
| 364 |
+
\newcount\cv@refno
|
| 365 |
+
\newcount\cv@tot
|
| 366 |
+
% NUMBER with left flushed zeros \fillzeros[<WIDTH>]<NUMBER>
|
| 367 |
+
\newcount\cv@tmpc@ \newcount\cv@tmpc
|
| 368 |
+
\def\fillzeros[#1]#2{\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi
|
| 369 |
+
\cv@tmpc=1 %
|
| 370 |
+
\loop\ifnum\cv@tmpc@<10 \else \divide\cv@tmpc@ by 10 \advance\cv@tmpc by 1 \fi
|
| 371 |
+
\ifnum\cv@tmpc@=10\relax\cv@tmpc@=11\relax\fi \ifnum\cv@tmpc@>10 \repeat
|
| 372 |
+
\ifnum#2<0\advance\cv@tmpc1\relax-\fi
|
| 373 |
+
\loop\ifnum\cv@tmpc<#1\relax0\advance\cv@tmpc1\relax\fi \ifnum\cv@tmpc<#1 \repeat
|
| 374 |
+
\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi \relax\the\cv@tmpc@}%
|
| 375 |
+
\makeatother
|
| 376 |
+
% ----- end of vruler
|
| 377 |
+
|
| 378 |
+
%% Define linenumber setup
|
| 379 |
+
\RequirePackage[switch,mathlines]{lineno}
|
| 380 |
+
|
| 381 |
+
% Line numbers in ICCV blue using font from \iccvtenhv
|
| 382 |
+
\renewcommand\linenumberfont{\iccvtenhv\color[rgb]{.5,.5,1}}
|
| 383 |
+
|
| 384 |
+
\renewcommand\thelinenumber{\fillzeros[3]{\arabic{linenumber}}}
|
| 385 |
+
|
| 386 |
+
\setlength{\linenumbersep}{.75cm}
|
| 387 |
+
|
| 388 |
+
% Bug: An equation with $$ ... $$ isn't numbered, nor is the previous line.
|
| 389 |
+
|
| 390 |
+
% Patch amsmath commands so that the previous line and the equation itself
|
| 391 |
+
% are numbered. Bug: multiline has an extra line number.
|
| 392 |
+
% https://tex.stackexchange.com/questions/461186/how-to-use-lineno-with-amsmath-align
|
| 393 |
+
\RequirePackage{etoolbox} %% <- for \pretocmd, \apptocmd and \patchcmd
|
| 394 |
+
|
| 395 |
+
\newcommand*\linenomathpatch[1]{%
|
| 396 |
+
\expandafter\pretocmd\csname #1\endcsname {\linenomath}{}{}%
|
| 397 |
+
\expandafter\pretocmd\csname #1*\endcsname {\linenomath}{}{}%
|
| 398 |
+
\expandafter\apptocmd\csname end#1\endcsname {\endlinenomath}{}{}%
|
| 399 |
+
\expandafter\apptocmd\csname end#1*\endcsname {\endlinenomath}{}{}%
|
| 400 |
+
}
|
| 401 |
+
\newcommand*\linenomathpatchAMS[1]{%
|
| 402 |
+
\expandafter\pretocmd\csname #1\endcsname {\linenomathAMS}{}{}%
|
| 403 |
+
\expandafter\pretocmd\csname #1*\endcsname {\linenomathAMS}{}{}%
|
| 404 |
+
\expandafter\apptocmd\csname end#1\endcsname {\endlinenomath}{}{}%
|
| 405 |
+
\expandafter\apptocmd\csname end#1*\endcsname {\endlinenomath}{}{}%
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
%% Definition of \linenomathAMS depends on whether the mathlines option is provided
|
| 409 |
+
\expandafter\ifx\linenomath\linenomathWithnumbers
|
| 410 |
+
\let\linenomathAMS\linenomathWithnumbers
|
| 411 |
+
%% The following line gets rid of an extra line numbers at the bottom:
|
| 412 |
+
\patchcmd\linenomathAMS{\advance\postdisplaypenalty\linenopenalty}{}{}{}
|
| 413 |
+
\else
|
| 414 |
+
\let\linenomathAMS\linenomathNonumbers
|
| 415 |
+
\fi
|
| 416 |
+
|
| 417 |
+
% Add the numbers
|
| 418 |
+
\linenumbers
|
| 419 |
+
\AtBeginDocument{%
|
| 420 |
+
\linenomathpatch{equation}%
|
| 421 |
+
\linenomathpatchAMS{gather}%
|
| 422 |
+
\linenomathpatchAMS{multline}%
|
| 423 |
+
\linenomathpatchAMS{align}%
|
| 424 |
+
\linenomathpatchAMS{alignat}%
|
| 425 |
+
\linenomathpatchAMS{flalign}%
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
% \makevruler[<SCALE>][<INITIAL_COUNT>][<STEP>][<DIGITS>][<HEIGHT>]
|
| 429 |
+
\def\iccvruler#1{\makevruler[12pt][#1][1][3][0.993\textheight]\usebox{\iccvrulerbox}}
|
| 430 |
+
\AddToShipoutPicture{%
|
| 431 |
+
\color[rgb]{.5,.5,1}
|
| 432 |
+
|
| 433 |
+
\def\pid{\parbox{1in}{\begin{center}\bf\sf{\small \confName}\\\small \#\paperID\end{center}}}
|
| 434 |
+
\AtTextUpperLeft{%paperID in corners
|
| 435 |
+
\put(\LenToUnit{-65pt},\LenToUnit{45pt}){\pid}
|
| 436 |
+
\put(\LenToUnit{\textwidth-12pt},\LenToUnit{45pt}){\pid}
|
| 437 |
+
}
|
| 438 |
+
\AtTextUpperLeft{%confidential
|
| 439 |
+
\put(0,\LenToUnit{1cm}){\parbox{\textwidth}{\centering\iccvtenhv
|
| 440 |
+
\confName~\confYear~Submission \#\paperID. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.}}
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
} % end of not iccvfinal
|
| 444 |
+
|
| 445 |
+
%%% Make figure placement a little more predictable.
|
| 446 |
+
% We trust the user to move figures if this results
|
| 447 |
+
% in ugliness.
|
| 448 |
+
% Minimize bad page breaks at figures
|
| 449 |
+
\renewcommand{\textfraction}{0.01}
|
| 450 |
+
\renewcommand{\floatpagefraction}{0.99}
|
| 451 |
+
\renewcommand{\topfraction}{0.99}
|
| 452 |
+
\renewcommand{\bottomfraction}{0.99}
|
| 453 |
+
\renewcommand{\dblfloatpagefraction}{0.99}
|
| 454 |
+
\renewcommand{\dbltopfraction}{0.99}
|
| 455 |
+
\setcounter{totalnumber}{99}
|
| 456 |
+
\setcounter{topnumber}{99}
|
| 457 |
+
\setcounter{bottomnumber}{99}
|
| 458 |
+
|
| 459 |
+
% Add a period to the end of an abbreviation unless there's one
|
| 460 |
+
% already, then \xspace.
|
| 461 |
+
\makeatletter
|
| 462 |
+
\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot}
|
| 463 |
+
\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace}
|
| 464 |
+
|
| 465 |
+
\def\eg{\emph{e.g}\onedot} \def\Eg{\emph{E.g}\onedot}
|
| 466 |
+
\def\ie{\emph{i.e}\onedot} \def\Ie{\emph{I.e}\onedot}
|
| 467 |
+
\def\cf{\emph{cf}\onedot} \def\Cf{\emph{Cf}\onedot}
|
| 468 |
+
\def\etc{\emph{etc}\onedot} \def\vs{\emph{vs}\onedot}
|
| 469 |
+
\def\wrt{w.r.t\onedot} \def\dof{d.o.f\onedot}
|
| 470 |
+
\def\iid{i.i.d\onedot} \def\wolog{w.l.o.g\onedot}
|
| 471 |
+
\def\etal{\emph{et al}\onedot}
|
| 472 |
+
\makeatother
|
| 473 |
+
|
| 474 |
+
% ---------------------------------------------------------------
|
| 475 |
+
|
| 476 |
+
%% redefine the \title command so that a variable name is saved in \thetitle, and provides the \maketitlesupplementary command
|
| 477 |
+
\let\titleold\title
|
| 478 |
+
\renewcommand{\title}[1]{\titleold{#1}\newcommand{\thetitle}{#1}}
|
| 479 |
+
\def\maketitlesupplementary
|
| 480 |
+
{
|
| 481 |
+
\newpage
|
| 482 |
+
\twocolumn[
|
| 483 |
+
\centering
|
| 484 |
+
\Large
|
| 485 |
+
\textbf{\thetitle}\\
|
| 486 |
+
\vspace{0.5em}Supplementary Material \\
|
| 487 |
+
\vspace{1.0em}
|
| 488 |
+
] %< twocolumn
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
% ---------------------------------------------------------------
|
| 492 |
+
|
| 493 |
+
%% Support for easy cross-referencing (e.g. \cref{sec:intro}
|
| 494 |
+
% configured with \AtEndPreamble as it needs to be called after hyperref
|
| 495 |
+
\AtEndPreamble{
|
| 496 |
+
\usepackage[capitalize]{cleveref}
|
| 497 |
+
\crefname{section}{Sec.}{Secs.}
|
| 498 |
+
\Crefname{section}{Section}{Sections}
|
| 499 |
+
\Crefname{table}{Table}{Tables}
|
| 500 |
+
\crefname{table}{Tab.}{Tabs.}
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
% ---------------------------------------------------------------
|
| 504 |
+
|
| 505 |
+
%% More compact compact itemize/enumeration (e.g. list contributions)
|
| 506 |
+
\RequirePackage[shortlabels,inline]{enumitem}
|
| 507 |
+
\setlist[itemize]{noitemsep,leftmargin=*,topsep=0em}
|
| 508 |
+
\setlist[enumerate]{noitemsep,leftmargin=*,topsep=0em}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/ieeenat_fullname.bst
ADDED
|
@@ -0,0 +1,1448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
%% File: `abbrvnat.bst'
|
| 2 |
+
%% A modification of `abbrv.bst' for use with natbib package
|
| 3 |
+
%%
|
| 4 |
+
%% Copyright 1993-2007 Patrick W Daly
|
| 5 |
+
%% Max-Planck-Institut f\"ur Sonnensystemforschung
|
| 6 |
+
%% Max-Planck-Str. 2
|
| 7 |
+
%% D-37191 Katlenburg-Lindau
|
| 8 |
+
%% Germany
|
| 9 |
+
%% E-mail: daly@mps.mpg.de
|
| 10 |
+
%%
|
| 11 |
+
%% This program can be redistributed and/or modified under the terms
|
| 12 |
+
%% of the LaTeX Project Public License Distributed from CTAN
|
| 13 |
+
%% archives in directory macros/latex/base/lppl.txt; either
|
| 14 |
+
%% version 1 of the License, or any later version.
|
| 15 |
+
%%
|
| 16 |
+
% Version and source file information:
|
| 17 |
+
% \ProvidesFile{natbst.mbs}[2007/11/26 1.93 (PWD)]
|
| 18 |
+
%
|
| 19 |
+
% BibTeX `plainnat' family
|
| 20 |
+
% version 0.99b for BibTeX versions 0.99a or later,
|
| 21 |
+
% for LaTeX versions 2.09 and 2e.
|
| 22 |
+
%
|
| 23 |
+
% For use with the `natbib.sty' package; emulates the corresponding
|
| 24 |
+
% member of the `plain' family, but with author-year citations.
|
| 25 |
+
%
|
| 26 |
+
% With version 6.0 of `natbib.sty', it may also be used for numerical
|
| 27 |
+
% citations, while retaining the commands \citeauthor, \citefullauthor,
|
| 28 |
+
% and \citeyear to print the corresponding information.
|
| 29 |
+
%
|
| 30 |
+
% For version 7.0 of `natbib.sty', the KEY field replaces missing
|
| 31 |
+
% authors/editors, and the date is left blank in \bibitem.
|
| 32 |
+
%
|
| 33 |
+
% Includes field EID for the sequence/citation number of electronic journals
|
| 34 |
+
% which is used instead of page numbers.
|
| 35 |
+
%
|
| 36 |
+
% Includes fields ISBN and ISSN.
|
| 37 |
+
%
|
| 38 |
+
% Includes field URL for Internet addresses.
|
| 39 |
+
%
|
| 40 |
+
% Includes field DOI for Digital Object Idenfifiers.
|
| 41 |
+
%
|
| 42 |
+
% Works best with the url.sty package of Donald Arseneau.
|
| 43 |
+
%
|
| 44 |
+
% Works with identical authors and year are further sorted by
|
| 45 |
+
% citation key, to preserve any natural sequence.
|
| 46 |
+
%
|
| 47 |
+
ENTRY
|
| 48 |
+
{ address
|
| 49 |
+
author
|
| 50 |
+
booktitle
|
| 51 |
+
chapter
|
| 52 |
+
doi
|
| 53 |
+
eid
|
| 54 |
+
edition
|
| 55 |
+
editor
|
| 56 |
+
howpublished
|
| 57 |
+
institution
|
| 58 |
+
isbn
|
| 59 |
+
issn
|
| 60 |
+
journal
|
| 61 |
+
key
|
| 62 |
+
month
|
| 63 |
+
note
|
| 64 |
+
number
|
| 65 |
+
organization
|
| 66 |
+
pages
|
| 67 |
+
publisher
|
| 68 |
+
school
|
| 69 |
+
series
|
| 70 |
+
title
|
| 71 |
+
type
|
| 72 |
+
url
|
| 73 |
+
volume
|
| 74 |
+
year
|
| 75 |
+
}
|
| 76 |
+
{}
|
| 77 |
+
{ label extra.label sort.label short.list }
|
| 78 |
+
|
| 79 |
+
INTEGERS { output.state before.all mid.sentence after.sentence after.block }
|
| 80 |
+
|
| 81 |
+
FUNCTION {init.state.consts}
|
| 82 |
+
{ #0 'before.all :=
|
| 83 |
+
#1 'mid.sentence :=
|
| 84 |
+
#2 'after.sentence :=
|
| 85 |
+
#3 'after.block :=
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
STRINGS { s t }
|
| 89 |
+
|
| 90 |
+
FUNCTION {output.nonnull}
|
| 91 |
+
{ 's :=
|
| 92 |
+
output.state mid.sentence =
|
| 93 |
+
{ ", " * write$ }
|
| 94 |
+
{ output.state after.block =
|
| 95 |
+
{ add.period$ write$
|
| 96 |
+
newline$
|
| 97 |
+
"\newblock " write$
|
| 98 |
+
}
|
| 99 |
+
{ output.state before.all =
|
| 100 |
+
'write$
|
| 101 |
+
{ add.period$ " " * write$ }
|
| 102 |
+
if$
|
| 103 |
+
}
|
| 104 |
+
if$
|
| 105 |
+
mid.sentence 'output.state :=
|
| 106 |
+
}
|
| 107 |
+
if$
|
| 108 |
+
s
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
FUNCTION {output}
|
| 112 |
+
{ duplicate$ empty$
|
| 113 |
+
'pop$
|
| 114 |
+
'output.nonnull
|
| 115 |
+
if$
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
FUNCTION {output.check}
|
| 119 |
+
{ 't :=
|
| 120 |
+
duplicate$ empty$
|
| 121 |
+
{ pop$ "empty " t * " in " * cite$ * warning$ }
|
| 122 |
+
'output.nonnull
|
| 123 |
+
if$
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
FUNCTION {fin.entry}
|
| 127 |
+
{ add.period$
|
| 128 |
+
write$
|
| 129 |
+
newline$
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
FUNCTION {new.block}
|
| 133 |
+
{ output.state before.all =
|
| 134 |
+
'skip$
|
| 135 |
+
{ after.block 'output.state := }
|
| 136 |
+
if$
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
FUNCTION {new.sentence}
|
| 140 |
+
{ output.state after.block =
|
| 141 |
+
'skip$
|
| 142 |
+
{ output.state before.all =
|
| 143 |
+
'skip$
|
| 144 |
+
{ after.sentence 'output.state := }
|
| 145 |
+
if$
|
| 146 |
+
}
|
| 147 |
+
if$
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
FUNCTION {not}
|
| 151 |
+
{ { #0 }
|
| 152 |
+
{ #1 }
|
| 153 |
+
if$
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
FUNCTION {and}
|
| 157 |
+
{ 'skip$
|
| 158 |
+
{ pop$ #0 }
|
| 159 |
+
if$
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
FUNCTION {or}
|
| 163 |
+
{ { pop$ #1 }
|
| 164 |
+
'skip$
|
| 165 |
+
if$
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
FUNCTION {new.block.checka}
|
| 169 |
+
{ empty$
|
| 170 |
+
'skip$
|
| 171 |
+
'new.block
|
| 172 |
+
if$
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
FUNCTION {new.block.checkb}
|
| 176 |
+
{ empty$
|
| 177 |
+
swap$ empty$
|
| 178 |
+
and
|
| 179 |
+
'skip$
|
| 180 |
+
'new.block
|
| 181 |
+
if$
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
FUNCTION {new.sentence.checka}
|
| 185 |
+
{ empty$
|
| 186 |
+
'skip$
|
| 187 |
+
'new.sentence
|
| 188 |
+
if$
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
FUNCTION {new.sentence.checkb}
|
| 192 |
+
{ empty$
|
| 193 |
+
swap$ empty$
|
| 194 |
+
and
|
| 195 |
+
'skip$
|
| 196 |
+
'new.sentence
|
| 197 |
+
if$
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
FUNCTION {field.or.null}
|
| 201 |
+
{ duplicate$ empty$
|
| 202 |
+
{ pop$ "" }
|
| 203 |
+
'skip$
|
| 204 |
+
if$
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
FUNCTION {emphasize}
|
| 208 |
+
{ duplicate$ empty$
|
| 209 |
+
{ pop$ "" }
|
| 210 |
+
{ "\emph{" swap$ * "}" * }
|
| 211 |
+
if$
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
INTEGERS { nameptr namesleft numnames }
|
| 215 |
+
|
| 216 |
+
FUNCTION {format.names}
|
| 217 |
+
{ 's :=
|
| 218 |
+
#1 'nameptr :=
|
| 219 |
+
s num.names$ 'numnames :=
|
| 220 |
+
numnames 'namesleft :=
|
| 221 |
+
{ namesleft #0 > }
|
| 222 |
+
% Formerly { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't :=
|
| 223 |
+
{ s nameptr "{ff }{vv }{ll}{, jj}" format.name$ 't :=
|
| 224 |
+
nameptr #1 >
|
| 225 |
+
{ namesleft #1 >
|
| 226 |
+
{ ", " * t * }
|
| 227 |
+
{ numnames #2 >
|
| 228 |
+
{ "," * }
|
| 229 |
+
'skip$
|
| 230 |
+
if$
|
| 231 |
+
t "others" =
|
| 232 |
+
{ " et~al." * }
|
| 233 |
+
{ " and " * t * }
|
| 234 |
+
if$
|
| 235 |
+
}
|
| 236 |
+
if$
|
| 237 |
+
}
|
| 238 |
+
't
|
| 239 |
+
if$
|
| 240 |
+
nameptr #1 + 'nameptr :=
|
| 241 |
+
namesleft #1 - 'namesleft :=
|
| 242 |
+
}
|
| 243 |
+
while$
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
FUNCTION {format.key}
|
| 247 |
+
{ empty$
|
| 248 |
+
{ key field.or.null }
|
| 249 |
+
{ "" }
|
| 250 |
+
if$
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
FUNCTION {format.authors}
|
| 254 |
+
{ author empty$
|
| 255 |
+
{ "" }
|
| 256 |
+
{ author format.names }
|
| 257 |
+
if$
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
FUNCTION {format.editors}
|
| 261 |
+
{ editor empty$
|
| 262 |
+
{ "" }
|
| 263 |
+
{ editor format.names
|
| 264 |
+
editor num.names$ #1 >
|
| 265 |
+
{ ", editors" * }
|
| 266 |
+
{ ", editor" * }
|
| 267 |
+
if$
|
| 268 |
+
}
|
| 269 |
+
if$
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
FUNCTION {format.isbn}
|
| 273 |
+
{ isbn empty$
|
| 274 |
+
{ "" }
|
| 275 |
+
% { new.block "ISBN " isbn * }
|
| 276 |
+
{ "" }
|
| 277 |
+
if$
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
FUNCTION {format.issn}
|
| 281 |
+
{ issn empty$
|
| 282 |
+
{ "" }
|
| 283 |
+
% { new.block "ISSN " issn * }
|
| 284 |
+
{ "" }
|
| 285 |
+
if$
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
FUNCTION {format.url}
|
| 289 |
+
{ url empty$
|
| 290 |
+
{ "" }
|
| 291 |
+
% { new.block "URL \url{" url * "}" * }
|
| 292 |
+
{ "" }
|
| 293 |
+
if$
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
FUNCTION {format.doi}
|
| 297 |
+
{ doi empty$
|
| 298 |
+
{ "" }
|
| 299 |
+
% { new.block "\doi{" doi * "}" * }
|
| 300 |
+
{ "" }
|
| 301 |
+
if$
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
FUNCTION {format.title}
|
| 305 |
+
{ title empty$
|
| 306 |
+
{ "" }
|
| 307 |
+
{ title "t" change.case$ }
|
| 308 |
+
if$
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
FUNCTION {format.full.names}
|
| 312 |
+
{'s :=
|
| 313 |
+
#1 'nameptr :=
|
| 314 |
+
s num.names$ 'numnames :=
|
| 315 |
+
numnames 'namesleft :=
|
| 316 |
+
{ namesleft #0 > }
|
| 317 |
+
{ s nameptr
|
| 318 |
+
"{vv~}{ll}" format.name$ 't :=
|
| 319 |
+
nameptr #1 >
|
| 320 |
+
{
|
| 321 |
+
namesleft #1 >
|
| 322 |
+
{ ", " * t * }
|
| 323 |
+
{
|
| 324 |
+
numnames #2 >
|
| 325 |
+
{ "," * }
|
| 326 |
+
'skip$
|
| 327 |
+
if$
|
| 328 |
+
t "others" =
|
| 329 |
+
{ " et~al." * }
|
| 330 |
+
{ " and " * t * }
|
| 331 |
+
if$
|
| 332 |
+
}
|
| 333 |
+
if$
|
| 334 |
+
}
|
| 335 |
+
't
|
| 336 |
+
if$
|
| 337 |
+
nameptr #1 + 'nameptr :=
|
| 338 |
+
namesleft #1 - 'namesleft :=
|
| 339 |
+
}
|
| 340 |
+
while$
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
FUNCTION {author.editor.full}
|
| 344 |
+
{ author empty$
|
| 345 |
+
{ editor empty$
|
| 346 |
+
{ "" }
|
| 347 |
+
{ editor format.full.names }
|
| 348 |
+
if$
|
| 349 |
+
}
|
| 350 |
+
{ author format.full.names }
|
| 351 |
+
if$
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
FUNCTION {author.full}
|
| 355 |
+
{ author empty$
|
| 356 |
+
{ "" }
|
| 357 |
+
{ author format.full.names }
|
| 358 |
+
if$
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
FUNCTION {editor.full}
|
| 362 |
+
{ editor empty$
|
| 363 |
+
{ "" }
|
| 364 |
+
{ editor format.full.names }
|
| 365 |
+
if$
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
FUNCTION {make.full.names}
|
| 369 |
+
{ type$ "book" =
|
| 370 |
+
type$ "inbook" =
|
| 371 |
+
or
|
| 372 |
+
'author.editor.full
|
| 373 |
+
{ type$ "proceedings" =
|
| 374 |
+
'editor.full
|
| 375 |
+
'author.full
|
| 376 |
+
if$
|
| 377 |
+
}
|
| 378 |
+
if$
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
FUNCTION {output.bibitem}
|
| 382 |
+
{ newline$
|
| 383 |
+
"\bibitem[" write$
|
| 384 |
+
label write$
|
| 385 |
+
")" make.full.names duplicate$ short.list =
|
| 386 |
+
{ pop$ }
|
| 387 |
+
{ * }
|
| 388 |
+
if$
|
| 389 |
+
"]{" * write$
|
| 390 |
+
cite$ write$
|
| 391 |
+
"}" write$
|
| 392 |
+
newline$
|
| 393 |
+
""
|
| 394 |
+
before.all 'output.state :=
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
FUNCTION {n.dashify}
|
| 398 |
+
{ 't :=
|
| 399 |
+
""
|
| 400 |
+
{ t empty$ not }
|
| 401 |
+
{ t #1 #1 substring$ "-" =
|
| 402 |
+
{ t #1 #2 substring$ "--" = not
|
| 403 |
+
{ "--" *
|
| 404 |
+
t #2 global.max$ substring$ 't :=
|
| 405 |
+
}
|
| 406 |
+
{ { t #1 #1 substring$ "-" = }
|
| 407 |
+
{ "-" *
|
| 408 |
+
t #2 global.max$ substring$ 't :=
|
| 409 |
+
}
|
| 410 |
+
while$
|
| 411 |
+
}
|
| 412 |
+
if$
|
| 413 |
+
}
|
| 414 |
+
{ t #1 #1 substring$ *
|
| 415 |
+
t #2 global.max$ substring$ 't :=
|
| 416 |
+
}
|
| 417 |
+
if$
|
| 418 |
+
}
|
| 419 |
+
while$
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
FUNCTION {format.date}
|
| 423 |
+
{ year duplicate$ empty$
|
| 424 |
+
{ "empty year in " cite$ * warning$
|
| 425 |
+
pop$ "" }
|
| 426 |
+
'skip$
|
| 427 |
+
if$
|
| 428 |
+
%% CR: Leave out months.
|
| 429 |
+
% month empty$
|
| 430 |
+
% 'skip$
|
| 431 |
+
% { month
|
| 432 |
+
% " " * swap$ *
|
| 433 |
+
% }
|
| 434 |
+
% if$
|
| 435 |
+
extra.label *
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
FUNCTION {format.btitle}
|
| 439 |
+
{ title emphasize
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
FUNCTION {tie.or.space.connect}
|
| 443 |
+
{ duplicate$ text.length$ #3 <
|
| 444 |
+
{ "~" }
|
| 445 |
+
{ " " }
|
| 446 |
+
if$
|
| 447 |
+
swap$ * *
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
FUNCTION {either.or.check}
|
| 451 |
+
{ empty$
|
| 452 |
+
'pop$
|
| 453 |
+
{ "can't use both " swap$ * " fields in " * cite$ * warning$ }
|
| 454 |
+
if$
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
FUNCTION {format.bvolume}
|
| 458 |
+
{ volume empty$
|
| 459 |
+
{ "" }
|
| 460 |
+
%% CR: Don't show "volume 1234 of LNCS" etc.
|
| 461 |
+
% { "volume" volume tie.or.space.connect
|
| 462 |
+
% series empty$
|
| 463 |
+
% 'skip$
|
| 464 |
+
% { " of " * series emphasize * }
|
| 465 |
+
% if$
|
| 466 |
+
% "volume and number" number either.or.check
|
| 467 |
+
% }
|
| 468 |
+
{ "" }
|
| 469 |
+
if$
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
FUNCTION {format.number.series}
|
| 473 |
+
{ volume empty$
|
| 474 |
+
{ number empty$
|
| 475 |
+
%% CR: Leave out series information.
|
| 476 |
+
% { series field.or.null }
|
| 477 |
+
{ "" }
|
| 478 |
+
{ output.state mid.sentence =
|
| 479 |
+
{ "number" }
|
| 480 |
+
{ "Number" }
|
| 481 |
+
if$
|
| 482 |
+
number tie.or.space.connect
|
| 483 |
+
series empty$
|
| 484 |
+
{ "there's a number but no series in " cite$ * warning$ }
|
| 485 |
+
{ " in " * series * }
|
| 486 |
+
if$
|
| 487 |
+
}
|
| 488 |
+
if$
|
| 489 |
+
}
|
| 490 |
+
{ "" }
|
| 491 |
+
if$
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
FUNCTION {format.edition}
|
| 495 |
+
{ edition empty$
|
| 496 |
+
{ "" }
|
| 497 |
+
{ output.state mid.sentence =
|
| 498 |
+
{ edition "l" change.case$ " edition" * }
|
| 499 |
+
{ edition "t" change.case$ " edition" * }
|
| 500 |
+
if$
|
| 501 |
+
}
|
| 502 |
+
if$
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
INTEGERS { multiresult }
|
| 506 |
+
|
| 507 |
+
FUNCTION {multi.page.check}
|
| 508 |
+
{ 't :=
|
| 509 |
+
#0 'multiresult :=
|
| 510 |
+
{ multiresult not
|
| 511 |
+
t empty$ not
|
| 512 |
+
and
|
| 513 |
+
}
|
| 514 |
+
{ t #1 #1 substring$
|
| 515 |
+
duplicate$ "-" =
|
| 516 |
+
swap$ duplicate$ "," =
|
| 517 |
+
swap$ "+" =
|
| 518 |
+
or or
|
| 519 |
+
{ #1 'multiresult := }
|
| 520 |
+
{ t #2 global.max$ substring$ 't := }
|
| 521 |
+
if$
|
| 522 |
+
}
|
| 523 |
+
while$
|
| 524 |
+
multiresult
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
FUNCTION {format.pages}
|
| 528 |
+
{ pages empty$
|
| 529 |
+
{ "" }
|
| 530 |
+
{ pages multi.page.check
|
| 531 |
+
{ "pages" pages n.dashify tie.or.space.connect }
|
| 532 |
+
{ "page" pages tie.or.space.connect }
|
| 533 |
+
if$
|
| 534 |
+
}
|
| 535 |
+
if$
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
FUNCTION {format.eid}
|
| 539 |
+
{ eid empty$
|
| 540 |
+
{ "" }
|
| 541 |
+
{ "art." eid tie.or.space.connect }
|
| 542 |
+
if$
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
FUNCTION {format.vol.num.pages}
|
| 546 |
+
{ volume field.or.null
|
| 547 |
+
number empty$
|
| 548 |
+
'skip$
|
| 549 |
+
{ "\penalty0 (" number * ")" * *
|
| 550 |
+
volume empty$
|
| 551 |
+
{ "there's a number but no volume in " cite$ * warning$ }
|
| 552 |
+
'skip$
|
| 553 |
+
if$
|
| 554 |
+
}
|
| 555 |
+
if$
|
| 556 |
+
pages empty$
|
| 557 |
+
'skip$
|
| 558 |
+
{ duplicate$ empty$
|
| 559 |
+
{ pop$ format.pages }
|
| 560 |
+
{ ":\penalty0 " * pages n.dashify * }
|
| 561 |
+
if$
|
| 562 |
+
}
|
| 563 |
+
if$
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
FUNCTION {format.vol.num.eid}
|
| 567 |
+
{ volume field.or.null
|
| 568 |
+
number empty$
|
| 569 |
+
'skip$
|
| 570 |
+
{ "\penalty0 (" number * ")" * *
|
| 571 |
+
volume empty$
|
| 572 |
+
{ "there's a number but no volume in " cite$ * warning$ }
|
| 573 |
+
'skip$
|
| 574 |
+
if$
|
| 575 |
+
}
|
| 576 |
+
if$
|
| 577 |
+
eid empty$
|
| 578 |
+
'skip$
|
| 579 |
+
{ duplicate$ empty$
|
| 580 |
+
{ pop$ format.eid }
|
| 581 |
+
{ ":\penalty0 " * eid * }
|
| 582 |
+
if$
|
| 583 |
+
}
|
| 584 |
+
if$
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
FUNCTION {format.chapter.pages}
|
| 588 |
+
{ chapter empty$
|
| 589 |
+
'format.pages
|
| 590 |
+
{ type empty$
|
| 591 |
+
{ "chapter" }
|
| 592 |
+
{ type "l" change.case$ }
|
| 593 |
+
if$
|
| 594 |
+
chapter tie.or.space.connect
|
| 595 |
+
pages empty$
|
| 596 |
+
'skip$
|
| 597 |
+
{ ", " * format.pages * }
|
| 598 |
+
if$
|
| 599 |
+
}
|
| 600 |
+
if$
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
FUNCTION {format.in.ed.booktitle}
|
| 604 |
+
{ booktitle empty$
|
| 605 |
+
{ "" }
|
| 606 |
+
%% CR: Leave out editors even if the information is available.
|
| 607 |
+
% { editor empty$
|
| 608 |
+
% { "In " booktitle emphasize * }
|
| 609 |
+
% { "In " format.editors * ", " * booktitle emphasize * }
|
| 610 |
+
% if$
|
| 611 |
+
% }
|
| 612 |
+
{ "In " booktitle emphasize * }
|
| 613 |
+
if$
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
FUNCTION {empty.misc.check}
|
| 617 |
+
{ author empty$ title empty$ howpublished empty$
|
| 618 |
+
month empty$ year empty$ note empty$
|
| 619 |
+
and and and and and
|
| 620 |
+
key empty$ not and
|
| 621 |
+
{ "all relevant fields are empty in " cite$ * warning$ }
|
| 622 |
+
'skip$
|
| 623 |
+
if$
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
FUNCTION {format.thesis.type}
|
| 627 |
+
{ type empty$
|
| 628 |
+
'skip$
|
| 629 |
+
{ pop$
|
| 630 |
+
type "t" change.case$
|
| 631 |
+
}
|
| 632 |
+
if$
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
FUNCTION {format.tr.number}
|
| 636 |
+
{ type empty$
|
| 637 |
+
{ "Technical Report" }
|
| 638 |
+
'type
|
| 639 |
+
if$
|
| 640 |
+
number empty$
|
| 641 |
+
{ "t" change.case$ }
|
| 642 |
+
{ number tie.or.space.connect }
|
| 643 |
+
if$
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
FUNCTION {format.article.crossref}
|
| 647 |
+
{ key empty$
|
| 648 |
+
{ journal empty$
|
| 649 |
+
{ "need key or journal for " cite$ * " to crossref " * crossref *
|
| 650 |
+
warning$
|
| 651 |
+
""
|
| 652 |
+
}
|
| 653 |
+
{ "In \emph{" journal * "}" * }
|
| 654 |
+
if$
|
| 655 |
+
}
|
| 656 |
+
{ "In " }
|
| 657 |
+
if$
|
| 658 |
+
" \citet{" * crossref * "}" *
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
FUNCTION {format.book.crossref}
|
| 662 |
+
{ volume empty$
|
| 663 |
+
{ "empty volume in " cite$ * "'s crossref of " * crossref * warning$
|
| 664 |
+
"In "
|
| 665 |
+
}
|
| 666 |
+
{ "Volume" volume tie.or.space.connect
|
| 667 |
+
" of " *
|
| 668 |
+
}
|
| 669 |
+
if$
|
| 670 |
+
editor empty$
|
| 671 |
+
editor field.or.null author field.or.null =
|
| 672 |
+
or
|
| 673 |
+
{ key empty$
|
| 674 |
+
{ series empty$
|
| 675 |
+
{ "need editor, key, or series for " cite$ * " to crossref " *
|
| 676 |
+
crossref * warning$
|
| 677 |
+
"" *
|
| 678 |
+
}
|
| 679 |
+
{ "\emph{" * series * "}" * }
|
| 680 |
+
if$
|
| 681 |
+
}
|
| 682 |
+
'skip$
|
| 683 |
+
if$
|
| 684 |
+
}
|
| 685 |
+
'skip$
|
| 686 |
+
if$
|
| 687 |
+
" \citet{" * crossref * "}" *
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
FUNCTION {format.incoll.inproc.crossref}
|
| 691 |
+
{ editor empty$
|
| 692 |
+
editor field.or.null author field.or.null =
|
| 693 |
+
or
|
| 694 |
+
{ key empty$
|
| 695 |
+
{ booktitle empty$
|
| 696 |
+
{ "need editor, key, or booktitle for " cite$ * " to crossref " *
|
| 697 |
+
crossref * warning$
|
| 698 |
+
""
|
| 699 |
+
}
|
| 700 |
+
{ "In \emph{" booktitle * "}" * }
|
| 701 |
+
if$
|
| 702 |
+
}
|
| 703 |
+
{ "In " }
|
| 704 |
+
if$
|
| 705 |
+
}
|
| 706 |
+
{ "In " }
|
| 707 |
+
if$
|
| 708 |
+
" \citet{" * crossref * "}" *
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
FUNCTION {article}
|
| 712 |
+
{ output.bibitem
|
| 713 |
+
format.authors "author" output.check
|
| 714 |
+
author format.key output
|
| 715 |
+
new.block
|
| 716 |
+
format.title "title" output.check
|
| 717 |
+
new.block
|
| 718 |
+
crossref missing$
|
| 719 |
+
{ journal emphasize "journal" output.check
|
| 720 |
+
eid empty$
|
| 721 |
+
{ format.vol.num.pages output }
|
| 722 |
+
{ format.vol.num.eid output }
|
| 723 |
+
if$
|
| 724 |
+
format.date "year" output.check
|
| 725 |
+
}
|
| 726 |
+
{ format.article.crossref output.nonnull
|
| 727 |
+
eid empty$
|
| 728 |
+
{ format.pages output }
|
| 729 |
+
{ format.eid output }
|
| 730 |
+
if$
|
| 731 |
+
}
|
| 732 |
+
if$
|
| 733 |
+
format.issn output
|
| 734 |
+
format.doi output
|
| 735 |
+
format.url output
|
| 736 |
+
new.block
|
| 737 |
+
note output
|
| 738 |
+
fin.entry
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
FUNCTION {book}
|
| 742 |
+
{ output.bibitem
|
| 743 |
+
author empty$
|
| 744 |
+
{ format.editors "author and editor" output.check
|
| 745 |
+
editor format.key output
|
| 746 |
+
}
|
| 747 |
+
{ format.authors output.nonnull
|
| 748 |
+
crossref missing$
|
| 749 |
+
{ "author and editor" editor either.or.check }
|
| 750 |
+
'skip$
|
| 751 |
+
if$
|
| 752 |
+
}
|
| 753 |
+
if$
|
| 754 |
+
new.block
|
| 755 |
+
format.btitle "title" output.check
|
| 756 |
+
crossref missing$
|
| 757 |
+
{ format.bvolume output
|
| 758 |
+
new.block
|
| 759 |
+
format.number.series output
|
| 760 |
+
new.sentence
|
| 761 |
+
publisher "publisher" output.check
|
| 762 |
+
address output
|
| 763 |
+
}
|
| 764 |
+
{ new.block
|
| 765 |
+
format.book.crossref output.nonnull
|
| 766 |
+
}
|
| 767 |
+
if$
|
| 768 |
+
format.edition output
|
| 769 |
+
format.date "year" output.check
|
| 770 |
+
format.isbn output
|
| 771 |
+
format.doi output
|
| 772 |
+
format.url output
|
| 773 |
+
new.block
|
| 774 |
+
note output
|
| 775 |
+
fin.entry
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
FUNCTION {booklet}
|
| 779 |
+
{ output.bibitem
|
| 780 |
+
format.authors output
|
| 781 |
+
author format.key output
|
| 782 |
+
new.block
|
| 783 |
+
format.title "title" output.check
|
| 784 |
+
howpublished address new.block.checkb
|
| 785 |
+
howpublished output
|
| 786 |
+
address output
|
| 787 |
+
format.date output
|
| 788 |
+
format.isbn output
|
| 789 |
+
format.doi output
|
| 790 |
+
format.url output
|
| 791 |
+
new.block
|
| 792 |
+
note output
|
| 793 |
+
fin.entry
|
| 794 |
+
}
|
| 795 |
+
|
| 796 |
+
FUNCTION {inbook}
|
| 797 |
+
{ output.bibitem
|
| 798 |
+
author empty$
|
| 799 |
+
{ format.editors "author and editor" output.check
|
| 800 |
+
editor format.key output
|
| 801 |
+
}
|
| 802 |
+
{ format.authors output.nonnull
|
| 803 |
+
crossref missing$
|
| 804 |
+
{ "author and editor" editor either.or.check }
|
| 805 |
+
'skip$
|
| 806 |
+
if$
|
| 807 |
+
}
|
| 808 |
+
if$
|
| 809 |
+
new.block
|
| 810 |
+
format.btitle "title" output.check
|
| 811 |
+
crossref missing$
|
| 812 |
+
{ format.bvolume output
|
| 813 |
+
format.chapter.pages "chapter and pages" output.check
|
| 814 |
+
new.block
|
| 815 |
+
format.number.series output
|
| 816 |
+
new.sentence
|
| 817 |
+
publisher "publisher" output.check
|
| 818 |
+
address output
|
| 819 |
+
}
|
| 820 |
+
{ format.chapter.pages "chapter and pages" output.check
|
| 821 |
+
new.block
|
| 822 |
+
format.book.crossref output.nonnull
|
| 823 |
+
}
|
| 824 |
+
if$
|
| 825 |
+
format.edition output
|
| 826 |
+
format.date "year" output.check
|
| 827 |
+
format.isbn output
|
| 828 |
+
format.doi output
|
| 829 |
+
format.url output
|
| 830 |
+
new.block
|
| 831 |
+
note output
|
| 832 |
+
fin.entry
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
FUNCTION {incollection}
|
| 836 |
+
{ output.bibitem
|
| 837 |
+
format.authors "author" output.check
|
| 838 |
+
author format.key output
|
| 839 |
+
new.block
|
| 840 |
+
format.title "title" output.check
|
| 841 |
+
new.block
|
| 842 |
+
crossref missing$
|
| 843 |
+
{ format.in.ed.booktitle "booktitle" output.check
|
| 844 |
+
format.bvolume output
|
| 845 |
+
format.number.series output
|
| 846 |
+
format.chapter.pages output
|
| 847 |
+
new.sentence
|
| 848 |
+
publisher "publisher" output.check
|
| 849 |
+
address output
|
| 850 |
+
format.edition output
|
| 851 |
+
format.date "year" output.check
|
| 852 |
+
}
|
| 853 |
+
{ format.incoll.inproc.crossref output.nonnull
|
| 854 |
+
format.chapter.pages output
|
| 855 |
+
}
|
| 856 |
+
if$
|
| 857 |
+
format.isbn output
|
| 858 |
+
format.doi output
|
| 859 |
+
format.url output
|
| 860 |
+
new.block
|
| 861 |
+
note output
|
| 862 |
+
fin.entry
|
| 863 |
+
}
|
| 864 |
+
|
| 865 |
+
FUNCTION {inproceedings}
|
| 866 |
+
{ output.bibitem
|
| 867 |
+
format.authors "author" output.check
|
| 868 |
+
author format.key output
|
| 869 |
+
new.block
|
| 870 |
+
format.title "title" output.check
|
| 871 |
+
new.block
|
| 872 |
+
crossref missing$
|
| 873 |
+
{ format.in.ed.booktitle "booktitle" output.check
|
| 874 |
+
format.bvolume output
|
| 875 |
+
format.number.series output
|
| 876 |
+
format.pages output
|
| 877 |
+
address empty$
|
| 878 |
+
{ organization publisher new.sentence.checkb
|
| 879 |
+
organization output
|
| 880 |
+
publisher output
|
| 881 |
+
format.date "year" output.check
|
| 882 |
+
}
|
| 883 |
+
{ address output.nonnull
|
| 884 |
+
format.date "year" output.check
|
| 885 |
+
new.sentence
|
| 886 |
+
organization output
|
| 887 |
+
publisher output
|
| 888 |
+
}
|
| 889 |
+
if$
|
| 890 |
+
}
|
| 891 |
+
{ format.incoll.inproc.crossref output.nonnull
|
| 892 |
+
format.pages output
|
| 893 |
+
}
|
| 894 |
+
if$
|
| 895 |
+
format.isbn output
|
| 896 |
+
format.doi output
|
| 897 |
+
format.url output
|
| 898 |
+
new.block
|
| 899 |
+
note output
|
| 900 |
+
fin.entry
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
FUNCTION {conference} { inproceedings }
|
| 904 |
+
|
| 905 |
+
FUNCTION {manual}
|
| 906 |
+
{ output.bibitem
|
| 907 |
+
format.authors output
|
| 908 |
+
author format.key output
|
| 909 |
+
new.block
|
| 910 |
+
format.btitle "title" output.check
|
| 911 |
+
organization address new.block.checkb
|
| 912 |
+
organization output
|
| 913 |
+
address output
|
| 914 |
+
format.edition output
|
| 915 |
+
format.date output
|
| 916 |
+
format.url output
|
| 917 |
+
new.block
|
| 918 |
+
note output
|
| 919 |
+
fin.entry
|
| 920 |
+
}
|
| 921 |
+
|
| 922 |
+
FUNCTION {mastersthesis}
|
| 923 |
+
{ output.bibitem
|
| 924 |
+
format.authors "author" output.check
|
| 925 |
+
author format.key output
|
| 926 |
+
new.block
|
| 927 |
+
format.title "title" output.check
|
| 928 |
+
new.block
|
| 929 |
+
"Master's thesis" format.thesis.type output.nonnull
|
| 930 |
+
school "school" output.check
|
| 931 |
+
address output
|
| 932 |
+
format.date "year" output.check
|
| 933 |
+
format.url output
|
| 934 |
+
new.block
|
| 935 |
+
note output
|
| 936 |
+
fin.entry
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
FUNCTION {misc}
|
| 940 |
+
{ output.bibitem
|
| 941 |
+
format.authors output
|
| 942 |
+
author format.key output
|
| 943 |
+
title howpublished new.block.checkb
|
| 944 |
+
format.title output
|
| 945 |
+
howpublished new.block.checka
|
| 946 |
+
howpublished output
|
| 947 |
+
format.date output
|
| 948 |
+
format.issn output
|
| 949 |
+
format.url output
|
| 950 |
+
new.block
|
| 951 |
+
note output
|
| 952 |
+
fin.entry
|
| 953 |
+
empty.misc.check
|
| 954 |
+
}
|
| 955 |
+
|
| 956 |
+
FUNCTION {phdthesis}
|
| 957 |
+
{ output.bibitem
|
| 958 |
+
format.authors "author" output.check
|
| 959 |
+
author format.key output
|
| 960 |
+
new.block
|
| 961 |
+
format.btitle "title" output.check
|
| 962 |
+
new.block
|
| 963 |
+
"PhD thesis" format.thesis.type output.nonnull
|
| 964 |
+
school "school" output.check
|
| 965 |
+
address output
|
| 966 |
+
format.date "year" output.check
|
| 967 |
+
format.url output
|
| 968 |
+
new.block
|
| 969 |
+
note output
|
| 970 |
+
fin.entry
|
| 971 |
+
}
|
| 972 |
+
|
| 973 |
+
FUNCTION {proceedings}
|
| 974 |
+
{ output.bibitem
|
| 975 |
+
format.editors output
|
| 976 |
+
editor format.key output
|
| 977 |
+
new.block
|
| 978 |
+
format.btitle "title" output.check
|
| 979 |
+
format.bvolume output
|
| 980 |
+
format.number.series output
|
| 981 |
+
address output
|
| 982 |
+
format.date "year" output.check
|
| 983 |
+
new.sentence
|
| 984 |
+
organization output
|
| 985 |
+
publisher output
|
| 986 |
+
format.isbn output
|
| 987 |
+
format.doi output
|
| 988 |
+
format.url output
|
| 989 |
+
new.block
|
| 990 |
+
note output
|
| 991 |
+
fin.entry
|
| 992 |
+
}
|
| 993 |
+
|
| 994 |
+
FUNCTION {techreport}
|
| 995 |
+
{ output.bibitem
|
| 996 |
+
format.authors "author" output.check
|
| 997 |
+
author format.key output
|
| 998 |
+
new.block
|
| 999 |
+
format.title "title" output.check
|
| 1000 |
+
new.block
|
| 1001 |
+
format.tr.number output.nonnull
|
| 1002 |
+
institution "institution" output.check
|
| 1003 |
+
address output
|
| 1004 |
+
format.date "year" output.check
|
| 1005 |
+
format.url output
|
| 1006 |
+
new.block
|
| 1007 |
+
note output
|
| 1008 |
+
fin.entry
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
FUNCTION {unpublished}
|
| 1012 |
+
{ output.bibitem
|
| 1013 |
+
format.authors "author" output.check
|
| 1014 |
+
author format.key output
|
| 1015 |
+
new.block
|
| 1016 |
+
format.title "title" output.check
|
| 1017 |
+
new.block
|
| 1018 |
+
note "note" output.check
|
| 1019 |
+
format.date output
|
| 1020 |
+
format.url output
|
| 1021 |
+
fin.entry
|
| 1022 |
+
}
|
| 1023 |
+
|
| 1024 |
+
FUNCTION {default.type} { misc }
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
MACRO {jan} {"Jan."}
|
| 1028 |
+
|
| 1029 |
+
MACRO {feb} {"Feb."}
|
| 1030 |
+
|
| 1031 |
+
MACRO {mar} {"Mar."}
|
| 1032 |
+
|
| 1033 |
+
MACRO {apr} {"Apr."}
|
| 1034 |
+
|
| 1035 |
+
MACRO {may} {"May"}
|
| 1036 |
+
|
| 1037 |
+
MACRO {jun} {"June"}
|
| 1038 |
+
|
| 1039 |
+
MACRO {jul} {"July"}
|
| 1040 |
+
|
| 1041 |
+
MACRO {aug} {"Aug."}
|
| 1042 |
+
|
| 1043 |
+
MACRO {sep} {"Sept."}
|
| 1044 |
+
|
| 1045 |
+
MACRO {oct} {"Oct."}
|
| 1046 |
+
|
| 1047 |
+
MACRO {nov} {"Nov."}
|
| 1048 |
+
|
| 1049 |
+
MACRO {dec} {"Dec."}
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
MACRO {acmcs} {"ACM Comput. Surv."}
|
| 1054 |
+
|
| 1055 |
+
MACRO {acta} {"Acta Inf."}
|
| 1056 |
+
|
| 1057 |
+
MACRO {cacm} {"Commun. ACM"}
|
| 1058 |
+
|
| 1059 |
+
MACRO {ibmjrd} {"IBM J. Res. Dev."}
|
| 1060 |
+
|
| 1061 |
+
MACRO {ibmsj} {"IBM Syst.~J."}
|
| 1062 |
+
|
| 1063 |
+
MACRO {ieeese} {"IEEE Trans. Softw. Eng."}
|
| 1064 |
+
|
| 1065 |
+
MACRO {ieeetc} {"IEEE Trans. Comput."}
|
| 1066 |
+
|
| 1067 |
+
MACRO {ieeetcad}
|
| 1068 |
+
{"IEEE Trans. Comput.-Aided Design Integrated Circuits"}
|
| 1069 |
+
|
| 1070 |
+
MACRO {ipl} {"Inf. Process. Lett."}
|
| 1071 |
+
|
| 1072 |
+
MACRO {jacm} {"J.~ACM"}
|
| 1073 |
+
|
| 1074 |
+
MACRO {jcss} {"J.~Comput. Syst. Sci."}
|
| 1075 |
+
|
| 1076 |
+
MACRO {scp} {"Sci. Comput. Programming"}
|
| 1077 |
+
|
| 1078 |
+
MACRO {sicomp} {"SIAM J. Comput."}
|
| 1079 |
+
|
| 1080 |
+
MACRO {tocs} {"ACM Trans. Comput. Syst."}
|
| 1081 |
+
|
| 1082 |
+
MACRO {tods} {"ACM Trans. Database Syst."}
|
| 1083 |
+
|
| 1084 |
+
MACRO {tog} {"ACM Trans. Gr."}
|
| 1085 |
+
|
| 1086 |
+
MACRO {toms} {"ACM Trans. Math. Softw."}
|
| 1087 |
+
|
| 1088 |
+
MACRO {toois} {"ACM Trans. Office Inf. Syst."}
|
| 1089 |
+
|
| 1090 |
+
MACRO {toplas} {"ACM Trans. Prog. Lang. Syst."}
|
| 1091 |
+
|
| 1092 |
+
MACRO {tcs} {"Theoretical Comput. Sci."}
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
READ
|
| 1096 |
+
|
| 1097 |
+
FUNCTION {sortify}
|
| 1098 |
+
{ purify$
|
| 1099 |
+
"l" change.case$
|
| 1100 |
+
}
|
| 1101 |
+
|
| 1102 |
+
INTEGERS { len }
|
| 1103 |
+
|
| 1104 |
+
FUNCTION {chop.word}
|
| 1105 |
+
{ 's :=
|
| 1106 |
+
'len :=
|
| 1107 |
+
s #1 len substring$ =
|
| 1108 |
+
{ s len #1 + global.max$ substring$ }
|
| 1109 |
+
's
|
| 1110 |
+
if$
|
| 1111 |
+
}
|
| 1112 |
+
|
| 1113 |
+
FUNCTION {format.lab.names}
|
| 1114 |
+
{ 's :=
|
| 1115 |
+
s #1 "{vv~}{ll}" format.name$
|
| 1116 |
+
s num.names$ duplicate$
|
| 1117 |
+
#2 >
|
| 1118 |
+
{ pop$ " et~al." * }
|
| 1119 |
+
{ #2 <
|
| 1120 |
+
'skip$
|
| 1121 |
+
{ s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
|
| 1122 |
+
{ " et~al." * }
|
| 1123 |
+
{ " and " * s #2 "{vv~}{ll}" format.name$ * }
|
| 1124 |
+
if$
|
| 1125 |
+
}
|
| 1126 |
+
if$
|
| 1127 |
+
}
|
| 1128 |
+
if$
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
FUNCTION {author.key.label}
|
| 1132 |
+
{ author empty$
|
| 1133 |
+
{ key empty$
|
| 1134 |
+
{ cite$ #1 #3 substring$ }
|
| 1135 |
+
'key
|
| 1136 |
+
if$
|
| 1137 |
+
}
|
| 1138 |
+
{ author format.lab.names }
|
| 1139 |
+
if$
|
| 1140 |
+
}
|
| 1141 |
+
|
| 1142 |
+
FUNCTION {author.editor.key.label}
|
| 1143 |
+
{ author empty$
|
| 1144 |
+
{ editor empty$
|
| 1145 |
+
{ key empty$
|
| 1146 |
+
{ cite$ #1 #3 substring$ }
|
| 1147 |
+
'key
|
| 1148 |
+
if$
|
| 1149 |
+
}
|
| 1150 |
+
{ editor format.lab.names }
|
| 1151 |
+
if$
|
| 1152 |
+
}
|
| 1153 |
+
{ author format.lab.names }
|
| 1154 |
+
if$
|
| 1155 |
+
}
|
| 1156 |
+
|
| 1157 |
+
FUNCTION {author.key.organization.label}
|
| 1158 |
+
{ author empty$
|
| 1159 |
+
{ key empty$
|
| 1160 |
+
{ organization empty$
|
| 1161 |
+
{ cite$ #1 #3 substring$ }
|
| 1162 |
+
{ "The " #4 organization chop.word #3 text.prefix$ }
|
| 1163 |
+
if$
|
| 1164 |
+
}
|
| 1165 |
+
'key
|
| 1166 |
+
if$
|
| 1167 |
+
}
|
| 1168 |
+
{ author format.lab.names }
|
| 1169 |
+
if$
|
| 1170 |
+
}
|
| 1171 |
+
|
| 1172 |
+
FUNCTION {editor.key.organization.label}
|
| 1173 |
+
{ editor empty$
|
| 1174 |
+
{ key empty$
|
| 1175 |
+
{ organization empty$
|
| 1176 |
+
{ cite$ #1 #3 substring$ }
|
| 1177 |
+
{ "The " #4 organization chop.word #3 text.prefix$ }
|
| 1178 |
+
if$
|
| 1179 |
+
}
|
| 1180 |
+
'key
|
| 1181 |
+
if$
|
| 1182 |
+
}
|
| 1183 |
+
{ editor format.lab.names }
|
| 1184 |
+
if$
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
FUNCTION {calc.short.authors}
|
| 1188 |
+
{ type$ "book" =
|
| 1189 |
+
type$ "inbook" =
|
| 1190 |
+
or
|
| 1191 |
+
'author.editor.key.label
|
| 1192 |
+
{ type$ "proceedings" =
|
| 1193 |
+
'editor.key.organization.label
|
| 1194 |
+
{ type$ "manual" =
|
| 1195 |
+
'author.key.organization.label
|
| 1196 |
+
'author.key.label
|
| 1197 |
+
if$
|
| 1198 |
+
}
|
| 1199 |
+
if$
|
| 1200 |
+
}
|
| 1201 |
+
if$
|
| 1202 |
+
'short.list :=
|
| 1203 |
+
}
|
| 1204 |
+
|
| 1205 |
+
FUNCTION {calc.label}
|
| 1206 |
+
{ calc.short.authors
|
| 1207 |
+
short.list
|
| 1208 |
+
"("
|
| 1209 |
+
*
|
| 1210 |
+
year duplicate$ empty$
|
| 1211 |
+
short.list key field.or.null = or
|
| 1212 |
+
{ pop$ "" }
|
| 1213 |
+
'skip$
|
| 1214 |
+
if$
|
| 1215 |
+
*
|
| 1216 |
+
'label :=
|
| 1217 |
+
}
|
| 1218 |
+
|
| 1219 |
+
FUNCTION {sort.format.names}
|
| 1220 |
+
{ 's :=
|
| 1221 |
+
#1 'nameptr :=
|
| 1222 |
+
""
|
| 1223 |
+
s num.names$ 'numnames :=
|
| 1224 |
+
numnames 'namesleft :=
|
| 1225 |
+
{ namesleft #0 > }
|
| 1226 |
+
{
|
| 1227 |
+
s nameptr "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" format.name$ 't :=
|
| 1228 |
+
nameptr #1 >
|
| 1229 |
+
{
|
| 1230 |
+
" " *
|
| 1231 |
+
namesleft #1 = t "others" = and
|
| 1232 |
+
{ "zzzzz" * }
|
| 1233 |
+
{ numnames #2 > nameptr #2 = and
|
| 1234 |
+
{ "zz" * year field.or.null * " " * }
|
| 1235 |
+
'skip$
|
| 1236 |
+
if$
|
| 1237 |
+
t sortify *
|
| 1238 |
+
}
|
| 1239 |
+
if$
|
| 1240 |
+
}
|
| 1241 |
+
{ t sortify * }
|
| 1242 |
+
if$
|
| 1243 |
+
nameptr #1 + 'nameptr :=
|
| 1244 |
+
namesleft #1 - 'namesleft :=
|
| 1245 |
+
}
|
| 1246 |
+
while$
|
| 1247 |
+
}
|
| 1248 |
+
|
| 1249 |
+
FUNCTION {sort.format.title}
|
| 1250 |
+
{ 't :=
|
| 1251 |
+
"A " #2
|
| 1252 |
+
"An " #3
|
| 1253 |
+
"The " #4 t chop.word
|
| 1254 |
+
chop.word
|
| 1255 |
+
chop.word
|
| 1256 |
+
sortify
|
| 1257 |
+
#1 global.max$ substring$
|
| 1258 |
+
}
|
| 1259 |
+
|
| 1260 |
+
FUNCTION {author.sort}
|
| 1261 |
+
{ author empty$
|
| 1262 |
+
{ key empty$
|
| 1263 |
+
{ "to sort, need author or key in " cite$ * warning$
|
| 1264 |
+
""
|
| 1265 |
+
}
|
| 1266 |
+
{ key sortify }
|
| 1267 |
+
if$
|
| 1268 |
+
}
|
| 1269 |
+
{ author sort.format.names }
|
| 1270 |
+
if$
|
| 1271 |
+
}
|
| 1272 |
+
|
| 1273 |
+
FUNCTION {author.editor.sort}
|
| 1274 |
+
{ author empty$
|
| 1275 |
+
{ editor empty$
|
| 1276 |
+
{ key empty$
|
| 1277 |
+
{ "to sort, need author, editor, or key in " cite$ * warning$
|
| 1278 |
+
""
|
| 1279 |
+
}
|
| 1280 |
+
{ key sortify }
|
| 1281 |
+
if$
|
| 1282 |
+
}
|
| 1283 |
+
{ editor sort.format.names }
|
| 1284 |
+
if$
|
| 1285 |
+
}
|
| 1286 |
+
{ author sort.format.names }
|
| 1287 |
+
if$
|
| 1288 |
+
}
|
| 1289 |
+
|
| 1290 |
+
FUNCTION {author.organization.sort}
|
| 1291 |
+
{ author empty$
|
| 1292 |
+
{ organization empty$
|
| 1293 |
+
{ key empty$
|
| 1294 |
+
{ "to sort, need author, organization, or key in " cite$ * warning$
|
| 1295 |
+
""
|
| 1296 |
+
}
|
| 1297 |
+
{ key sortify }
|
| 1298 |
+
if$
|
| 1299 |
+
}
|
| 1300 |
+
{ "The " #4 organization chop.word sortify }
|
| 1301 |
+
if$
|
| 1302 |
+
}
|
| 1303 |
+
{ author sort.format.names }
|
| 1304 |
+
if$
|
| 1305 |
+
}
|
| 1306 |
+
|
| 1307 |
+
FUNCTION {editor.organization.sort}
|
| 1308 |
+
{ editor empty$
|
| 1309 |
+
{ organization empty$
|
| 1310 |
+
{ key empty$
|
| 1311 |
+
{ "to sort, need editor, organization, or key in " cite$ * warning$
|
| 1312 |
+
""
|
| 1313 |
+
}
|
| 1314 |
+
{ key sortify }
|
| 1315 |
+
if$
|
| 1316 |
+
}
|
| 1317 |
+
{ "The " #4 organization chop.word sortify }
|
| 1318 |
+
if$
|
| 1319 |
+
}
|
| 1320 |
+
{ editor sort.format.names }
|
| 1321 |
+
if$
|
| 1322 |
+
}
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
FUNCTION {presort}
|
| 1326 |
+
{ calc.label
|
| 1327 |
+
label sortify
|
| 1328 |
+
" "
|
| 1329 |
+
*
|
| 1330 |
+
type$ "book" =
|
| 1331 |
+
type$ "inbook" =
|
| 1332 |
+
or
|
| 1333 |
+
'author.editor.sort
|
| 1334 |
+
{ type$ "proceedings" =
|
| 1335 |
+
'editor.organization.sort
|
| 1336 |
+
{ type$ "manual" =
|
| 1337 |
+
'author.organization.sort
|
| 1338 |
+
'author.sort
|
| 1339 |
+
if$
|
| 1340 |
+
}
|
| 1341 |
+
if$
|
| 1342 |
+
}
|
| 1343 |
+
if$
|
| 1344 |
+
" "
|
| 1345 |
+
*
|
| 1346 |
+
year field.or.null sortify
|
| 1347 |
+
*
|
| 1348 |
+
" "
|
| 1349 |
+
*
|
| 1350 |
+
cite$
|
| 1351 |
+
*
|
| 1352 |
+
#1 entry.max$ substring$
|
| 1353 |
+
'sort.label :=
|
| 1354 |
+
sort.label *
|
| 1355 |
+
#1 entry.max$ substring$
|
| 1356 |
+
'sort.key$ :=
|
| 1357 |
+
}
|
| 1358 |
+
|
| 1359 |
+
ITERATE {presort}
|
| 1360 |
+
|
| 1361 |
+
SORT
|
| 1362 |
+
|
| 1363 |
+
STRINGS { longest.label last.label next.extra }
|
| 1364 |
+
|
| 1365 |
+
INTEGERS { longest.label.width last.extra.num number.label }
|
| 1366 |
+
|
| 1367 |
+
FUNCTION {initialize.longest.label}
|
| 1368 |
+
{ "" 'longest.label :=
|
| 1369 |
+
#0 int.to.chr$ 'last.label :=
|
| 1370 |
+
"" 'next.extra :=
|
| 1371 |
+
#0 'longest.label.width :=
|
| 1372 |
+
#0 'last.extra.num :=
|
| 1373 |
+
#0 'number.label :=
|
| 1374 |
+
}
|
| 1375 |
+
|
| 1376 |
+
FUNCTION {forward.pass}
|
| 1377 |
+
{ last.label label =
|
| 1378 |
+
{ last.extra.num #1 + 'last.extra.num :=
|
| 1379 |
+
last.extra.num int.to.chr$ 'extra.label :=
|
| 1380 |
+
}
|
| 1381 |
+
{ "a" chr.to.int$ 'last.extra.num :=
|
| 1382 |
+
"" 'extra.label :=
|
| 1383 |
+
label 'last.label :=
|
| 1384 |
+
}
|
| 1385 |
+
if$
|
| 1386 |
+
number.label #1 + 'number.label :=
|
| 1387 |
+
}
|
| 1388 |
+
|
| 1389 |
+
FUNCTION {reverse.pass}
|
| 1390 |
+
{ next.extra "b" =
|
| 1391 |
+
{ "a" 'extra.label := }
|
| 1392 |
+
'skip$
|
| 1393 |
+
if$
|
| 1394 |
+
extra.label 'next.extra :=
|
| 1395 |
+
extra.label
|
| 1396 |
+
duplicate$ empty$
|
| 1397 |
+
'skip$
|
| 1398 |
+
{ "{\natexlab{" swap$ * "}}" * }
|
| 1399 |
+
if$
|
| 1400 |
+
'extra.label :=
|
| 1401 |
+
label extra.label * 'label :=
|
| 1402 |
+
}
|
| 1403 |
+
|
| 1404 |
+
EXECUTE {initialize.longest.label}
|
| 1405 |
+
|
| 1406 |
+
ITERATE {forward.pass}
|
| 1407 |
+
|
| 1408 |
+
REVERSE {reverse.pass}
|
| 1409 |
+
|
| 1410 |
+
FUNCTION {bib.sort.order}
|
| 1411 |
+
{ sort.label 'sort.key$ :=
|
| 1412 |
+
}
|
| 1413 |
+
|
| 1414 |
+
ITERATE {bib.sort.order}
|
| 1415 |
+
|
| 1416 |
+
SORT
|
| 1417 |
+
|
| 1418 |
+
FUNCTION {begin.bib}
|
| 1419 |
+
{ preamble$ empty$
|
| 1420 |
+
'skip$
|
| 1421 |
+
{ preamble$ write$ newline$ }
|
| 1422 |
+
if$
|
| 1423 |
+
"\begin{thebibliography}{" number.label int.to.str$ * "}" *
|
| 1424 |
+
write$ newline$
|
| 1425 |
+
"\providecommand{\natexlab}[1]{#1}"
|
| 1426 |
+
write$ newline$
|
| 1427 |
+
"\providecommand{\url}[1]{\texttt{#1}}"
|
| 1428 |
+
write$ newline$
|
| 1429 |
+
"\expandafter\ifx\csname urlstyle\endcsname\relax"
|
| 1430 |
+
write$ newline$
|
| 1431 |
+
" \providecommand{\doi}[1]{doi: #1}\else"
|
| 1432 |
+
write$ newline$
|
| 1433 |
+
" \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi"
|
| 1434 |
+
write$ newline$
|
| 1435 |
+
}
|
| 1436 |
+
|
| 1437 |
+
EXECUTE {begin.bib}
|
| 1438 |
+
|
| 1439 |
+
EXECUTE {init.state.consts}
|
| 1440 |
+
|
| 1441 |
+
ITERATE {call.type$}
|
| 1442 |
+
|
| 1443 |
+
FUNCTION {end.bib}
|
| 1444 |
+
{ newline$
|
| 1445 |
+
"\end{thebibliography}" write$ newline$
|
| 1446 |
+
}
|
| 1447 |
+
|
| 1448 |
+
EXECUTE {end.bib}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.bbl
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\begin{thebibliography}{67}
|
| 2 |
+
\providecommand{\natexlab}[1]{#1}
|
| 3 |
+
\providecommand{\url}[1]{\texttt{#1}}
|
| 4 |
+
\expandafter\ifx\csname urlstyle\endcsname\relax
|
| 5 |
+
\providecommand{\doi}[1]{doi: #1}\else
|
| 6 |
+
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
|
| 7 |
+
|
| 8 |
+
\bibitem[Anhar et~al.(2014)Anhar, Palaiahnakote, Chan, and Tan]{Risnumawan2014cute}
|
| 9 |
+
R. Anhar, S. Palaiahnakote, C.~S. Chan, and C.~L. Tan.
|
| 10 |
+
\newblock A robust arbitrary text detection system for natural scene images.
|
| 11 |
+
\newblock \emph{Expert Syst. Appl.}, 41\penalty0 (18):\penalty0 8027–8048, 2014.
|
| 12 |
+
|
| 13 |
+
\bibitem[Baek et~al.(2019)Baek, Kim, Lee, Park, Han, Yun, Oh, and Lee]{whatwrong}
|
| 14 |
+
J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S.~J. Oh, and H. Lee.
|
| 15 |
+
\newblock What is wrong with scene text recognition model comparisons? dataset and model analysis.
|
| 16 |
+
\newblock In \emph{ICCV}, pages 4714--4722, 2019.
|
| 17 |
+
|
| 18 |
+
\bibitem[Bao et~al.(2022)Bao, Dong, Piao, and Wei]{Bao0PW22_beit}
|
| 19 |
+
H. Bao, L. Dong, S. Piao, and F. Wei.
|
| 20 |
+
\newblock {BEiT}: {BERT} pre-training of image transformers.
|
| 21 |
+
\newblock In \emph{ICLR}, 2022.
|
| 22 |
+
|
| 23 |
+
\bibitem[Bautista and Atienza(2022)]{BautistaA22PARSeq}
|
| 24 |
+
D. Bautista and R.l Atienza.
|
| 25 |
+
\newblock Scene text recognition with permuted autoregressive sequence models.
|
| 26 |
+
\newblock In \emph{ECCV}, pages 178--196, 2022.
|
| 27 |
+
|
| 28 |
+
\bibitem[Chen et~al.(2021{\natexlab{a}})Chen, Li, and Xue]{cvpr2021TransOCR}
|
| 29 |
+
J. Chen, B. Li, and X. Xue.
|
| 30 |
+
\newblock {Scene Text Telescope}: Text-focused scene image super-resolution.
|
| 31 |
+
\newblock In \emph{CVPR}, pages 12021--12030, 2021{\natexlab{a}}.
|
| 32 |
+
|
| 33 |
+
\bibitem[Chen et~al.(2021{\natexlab{b}})Chen, Yu, Ma, Guan, Xu, Wang, Qu, Li, and Xue]{chen2021benchmarking}
|
| 34 |
+
J. Chen, H. Yu, J. Ma, M. Guan, X. Xu, X. Wang, S. Qu, B. Li, and X. Xue.
|
| 35 |
+
\newblock Benchmarking chinese text recognition: Datasets, baselines, and an empirical study.
|
| 36 |
+
\newblock \emph{CoRR}, abs/2112.15093, 2021{\natexlab{b}}.
|
| 37 |
+
|
| 38 |
+
\bibitem[Chen et~al.(2022)Chen, Jin, Zhu, Luo, and Wang]{ChenJZLW21str_survey}
|
| 39 |
+
X. Chen, L. Jin, Y. Zhu, C. Luo, and T. Wang.
|
| 40 |
+
\newblock Text recognition in the wild: {A} survey.
|
| 41 |
+
\newblock \emph{{ACM} Comput. Surv.}, 54\penalty0 (2):\penalty0 42:1--42:35, 2022.
|
| 42 |
+
|
| 43 |
+
\bibitem[Cheng et~al.(2023)Cheng, Wang, Da, Zheng, and Yao]{iccv2023lister}
|
| 44 |
+
C. Cheng, P. Wang, C. Da, Q. Zheng, and C. Yao.
|
| 45 |
+
\newblock {LISTER}: Neighbor decoding for length-insensitive scene text recognition.
|
| 46 |
+
\newblock In \emph{ICCV}, pages 19484--19494, 2023.
|
| 47 |
+
|
| 48 |
+
\bibitem[Da et~al.(2022)Da, Wang, and Yao]{levocr}
|
| 49 |
+
C. Da, P. Wang, and C. Yao.
|
| 50 |
+
\newblock {Levenshtein OCR}.
|
| 51 |
+
\newblock In \emph{ECCV}, pages 322--338, 2022.
|
| 52 |
+
|
| 53 |
+
\bibitem[Dosovitskiy et~al.(2021)Dosovitskiy, Beyer, Kolesnikov, Weissenborn, Zhai, Unterthiner, Dehghani, Minderer, Heigold, Gelly, Uszkoreit, and Houlsby]{dosovitskiy2020vit}
|
| 54 |
+
A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, J. Uszkoreit, and N. Houlsby.
|
| 55 |
+
\newblock An image is worth 16x16 words: Transformers for image recognition at scale.
|
| 56 |
+
\newblock In \emph{ICLR}, 2021.
|
| 57 |
+
|
| 58 |
+
\bibitem[Du et~al.(2022)Du, Chen, Jia, Yin, Zheng, Li, Du, and Jiang]{duijcai2022svtr}
|
| 59 |
+
Y. Du, Z. Chen, C. Jia, X. Yin, T. Zheng, C. Li, Y. Du, and Y.-G. Jiang.
|
| 60 |
+
\newblock {SVTR}: Scene text recognition with a single visual model.
|
| 61 |
+
\newblock In \emph{IJCAI}, pages 884--890, 2022.
|
| 62 |
+
|
| 63 |
+
\bibitem[Du et~al.(2025{\natexlab{a}})Du, Chen, Jia, Gao, and Jiang]{du2024smtr}
|
| 64 |
+
Y. Du, Z. Chen, C. Jia, X. Gao, and Y.-G. Jiang.
|
| 65 |
+
\newblock Out of length text recognition with sub-string matching.
|
| 66 |
+
\newblock In \emph{AAAI}, pages 2798--2806, 2025{\natexlab{a}}.
|
| 67 |
+
|
| 68 |
+
\bibitem[Du et~al.(2025{\natexlab{b}})Du, Chen, Jia, Yin, Li, Du, and Jiang]{du2023cppd}
|
| 69 |
+
Y. Du, Z. Chen, C. Jia, X. Yin, C. Li, Y. Du, and Y.-G. Jiang.
|
| 70 |
+
\newblock Context perception parallel decoder for scene text recognition.
|
| 71 |
+
\newblock \emph{IEEE Trans. Pattern Anal. Mach. Intell.}, 47\penalty0 (6):\penalty0 4668--4683, 2025{\natexlab{b}}.
|
| 72 |
+
|
| 73 |
+
\bibitem[Du et~al.(2025{\natexlab{c}})Du, Chen, Su, Jia, and Jiang]{du2024igtr}
|
| 74 |
+
Y. Du, Z. Chen, Y. Su, C. Jia, and Y.-G. Jiang.
|
| 75 |
+
\newblock Instruction-guided scene text recognition.
|
| 76 |
+
\newblock \emph{IEEE Trans. Pattern Anal. Mach. Intell.}, 47\penalty0 (4):\penalty0 2723--2738, 2025{\natexlab{c}}.
|
| 77 |
+
|
| 78 |
+
\bibitem[Fang et~al.(2021)Fang, Xie, Wang, Mao, and Zhang]{fang2021abinet}
|
| 79 |
+
S. Fang, H. Xie, Y. Wang, Z. Mao, and Y. Zhang.
|
| 80 |
+
\newblock {Read Like Humans}: Autonomous, bidirectional and iterative language modeling for scene text recognition.
|
| 81 |
+
\newblock In \emph{CVPR}, pages 7098--7107, 2021.
|
| 82 |
+
|
| 83 |
+
\bibitem[Fang et~al.(2023)Fang, Mao, Xie, Wang, Yan, and Zhang]{TPAMI2022ABINetPP}
|
| 84 |
+
Shancheng Fang, Zhendong Mao, Hongtao Xie, Yuxin Wang, Chenggang Yan, and Yongdong Zhang.
|
| 85 |
+
\newblock {ABINet}++: Autonomous, bidirectional and iterative language modeling for scene text spotting.
|
| 86 |
+
\newblock \emph{IEEE Trans. Pattern Anal. Mach. Intell.}, 45\penalty0 (6):\penalty0 7123–7141, 2023.
|
| 87 |
+
|
| 88 |
+
\bibitem[Graves et~al.(2006)Graves, Fern\'{a}ndez, Gomez, and Schmidhuber]{CTC}
|
| 89 |
+
A. Graves, S. Fern\'{a}ndez, F. Gomez, and J. Schmidhuber.
|
| 90 |
+
\newblock Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks.
|
| 91 |
+
\newblock In \emph{ICML}, pages 369--376, 2006.
|
| 92 |
+
|
| 93 |
+
\bibitem[Guan et~al.(2023{\natexlab{a}})Guan, Gu, Tu, Yang, Feng, Zhao, and Shen]{Guan_2023_CVPR_SIGA}
|
| 94 |
+
T. Guan, C. Gu, J. Tu, X. Yang, Q. Feng, Y. Zhao, and W. Shen.
|
| 95 |
+
\newblock {Self-Supervised} implicit glyph attention for text recognition.
|
| 96 |
+
\newblock In \emph{CVPR}, pages 15285--15294, 2023{\natexlab{a}}.
|
| 97 |
+
|
| 98 |
+
\bibitem[Guan et~al.(2023{\natexlab{b}})Guan, Shen, Yang, Feng, Jiang, and Yang]{Guan_2023_ICCV_CCD}
|
| 99 |
+
T. Guan, W. Shen, X. Yang, Q. Feng, Z. Jiang, and X. Yang.
|
| 100 |
+
\newblock {Self-Supervised Character-to-Character} distillation for text recognition.
|
| 101 |
+
\newblock In \emph{ICCV}, pages 19473--19484, 2023{\natexlab{b}}.
|
| 102 |
+
|
| 103 |
+
\bibitem[Gupta et~al.(2016)Gupta, Vedaldi, and Zisserman]{Synthetic}
|
| 104 |
+
A. Gupta, A. Vedaldi, and A. Zisserman.
|
| 105 |
+
\newblock Synthetic data for text localisation in natural images.
|
| 106 |
+
\newblock In \emph{CVPR}, pages 2315--2324, 2016.
|
| 107 |
+
|
| 108 |
+
\bibitem[He et~al.(2016)He, Zhang, Ren, and Sun]{he2016resnet}
|
| 109 |
+
K. He, X. Zhang, S. Ren, and J. Sun.
|
| 110 |
+
\newblock Deep residual learning for image recognition.
|
| 111 |
+
\newblock In \emph{CVPR}, pages 770--778, 2016.
|
| 112 |
+
|
| 113 |
+
\bibitem[He et~al.(2022)He, Chen, Xie, Li, Doll{\'{a}}r, and Girshick]{HeCXLDG22_mae}
|
| 114 |
+
K. He, X. Chen, S. Xie, Y. Li, P. Doll{\'{a}}r, and R.~B. Girshick.
|
| 115 |
+
\newblock Masked autoencoders are scalable vision learners.
|
| 116 |
+
\newblock In \emph{CVPR}, pages 15979--15988, 2022.
|
| 117 |
+
|
| 118 |
+
\bibitem[Hu et~al.(2020)Hu, Cai, Hou, Yi, and Lin]{hu2020gtc}
|
| 119 |
+
W. Hu, X. Cai, J. Hou, S. Yi, and Z. Lin.
|
| 120 |
+
\newblock {GTC}: Guided training of ctc towards efficient and accurate scene text recognition.
|
| 121 |
+
\newblock In \emph{AAAI}, pages 11005--11012, 2020.
|
| 122 |
+
|
| 123 |
+
\bibitem[Jaderberg et~al.(2014)Jaderberg, Simonyan, Vedaldi, and Zisserman]{jaderberg14synthetic}
|
| 124 |
+
M. Jaderberg, K. Simonyan, A. Vedaldi, and A. Zisserman.
|
| 125 |
+
\newblock Synthetic data and artificial neural networks for natural scene text recognition.
|
| 126 |
+
\newblock \emph{CoRR}, abs/1406.2227, 2014.
|
| 127 |
+
|
| 128 |
+
\bibitem[Jiang et~al.(2023)Jiang, Wang, Peng, Liu, and Jin]{jiang2023revisiting}
|
| 129 |
+
Q. Jiang, J. Wang, D. Peng, C. Liu, and L. Jin.
|
| 130 |
+
\newblock Revisiting scene text recognition: A data perspective.
|
| 131 |
+
\newblock In \emph{ICCV}, pages 20486--20497, 2023.
|
| 132 |
+
|
| 133 |
+
\bibitem[Karatzas et~al.(2015)Karatzas, Gomez-Bigorda, Nicolaou, Ghosh, Bagdanov, Iwamura, Matas, Neumann, Chandrasekhar, Lu, Shafait, Uchida, and Valveny]{icdar2015}
|
| 134 |
+
D. Karatzas, L. Gomez-Bigorda, A. Nicolaou, S. Ghosh, A. Bagdanov, M. Iwamura, J. Matas, L. Neumann, V.~R. Chandrasekhar, S. Lu, F. Shafait, S. Uchida, and E. Valveny.
|
| 135 |
+
\newblock {ICDAR} 2015 competition on robust reading.
|
| 136 |
+
\newblock In \emph{ICDAR}, pages 1156--1160, 2015.
|
| 137 |
+
|
| 138 |
+
\bibitem[KaratzasAU et~al.(2013)KaratzasAU, ShafaitAU, UchidaAU, IwamuraAU, i.~BigordaAU, MestreAU, MasAU, MotaAU, AlmazànAU, and de~las Heras]{icdar2013}
|
| 139 |
+
D. KaratzasAU, F. ShafaitAU, S. UchidaAU, M. IwamuraAU, L.~G. i. BigordaAU, S.~R. MestreAU, J. MasAU, D.~F. MotaAU, J.~A. AlmazànAU, and L.~P. de~las Heras.
|
| 140 |
+
\newblock {ICDAR} 2013 robust reading competition.
|
| 141 |
+
\newblock In \emph{ICDAR}, pages 1484--1493, 2013.
|
| 142 |
+
|
| 143 |
+
\bibitem[Li et~al.(2022)Li, Liu, Guo, Yin, Jiang, Du, Du, Zhu, Lai, Hu, Yu, and Ma]{ppocrv3}
|
| 144 |
+
C. Li, W. Liu, R. Guo, X. Yin, K. Jiang, Y. Du, Y. Du, L. Zhu, B. Lai, X. Hu, D. Yu, and Y. Ma.
|
| 145 |
+
\newblock {PP-OCRv3}: More attempts for the improvement of ultra lightweight ocr system.
|
| 146 |
+
\newblock \emph{CoRR}, abs/2206.03001, 2022.
|
| 147 |
+
|
| 148 |
+
\bibitem[Li et~al.(2019)Li, Wang, Shen, and Zhang]{li2019sar}
|
| 149 |
+
H. Li, P. Wang, C. Shen, and G. Zhang.
|
| 150 |
+
\newblock Show, attend and read: A simple and strong baseline for irregular text recognition.
|
| 151 |
+
\newblock In \emph{AAAI}, pages 8610--8617, 2019.
|
| 152 |
+
|
| 153 |
+
\bibitem[Loshchilov and Hutter(2019)]{adamw}
|
| 154 |
+
I. Loshchilov and F. Hutter.
|
| 155 |
+
\newblock Decoupled weight decay regularization.
|
| 156 |
+
\newblock In \emph{ICLR}, 2019.
|
| 157 |
+
|
| 158 |
+
\bibitem[Lu et~al.(2021)Lu, Yu, Qi, Chen, Gong, Xiao, and Bai]{pr2021MASTER}
|
| 159 |
+
N. Lu, W. Yu, X. Qi, Y. Chen, P. Gong, R. Xiao, and X. Bai.
|
| 160 |
+
\newblock {MASTER}: Multi-aspect non-local network for scene text recognition.
|
| 161 |
+
\newblock \emph{Pattern Recognit.}, 117:\penalty0 107980, 2021.
|
| 162 |
+
|
| 163 |
+
\bibitem[Luo et~al.(2019)Luo, Jin, and Sun]{pr2019MORAN}
|
| 164 |
+
C. Luo, L. Jin, and Z. Sun.
|
| 165 |
+
\newblock {MORAN}: A multi-object rectified attention network for scene text recognition.
|
| 166 |
+
\newblock \emph{Pattern Recognit.}, 90:\penalty0 109--118, 2019.
|
| 167 |
+
|
| 168 |
+
\bibitem[Mishra et~al.(2012)Mishra, Karteek, and Jawahar]{IIIT5K}
|
| 169 |
+
A. Mishra, A. Karteek, and C.~V. Jawahar.
|
| 170 |
+
\newblock Scene text recognition using higher order language priors.
|
| 171 |
+
\newblock In \emph{BMVC}, pages 1--11, 2012.
|
| 172 |
+
|
| 173 |
+
\bibitem[Na et~al.(2022)Na, Kim, and Park]{MATRN}
|
| 174 |
+
B. Na, Y. Kim, and S. Park.
|
| 175 |
+
\newblock {Multi-modal Text Recognition Networks}: Interactive enhancements between visual and semantic features.
|
| 176 |
+
\newblock In \emph{ECCV}, pages 446--463, 2022.
|
| 177 |
+
|
| 178 |
+
\bibitem[Phan et~al.(2013)Phan, Shivakumara, Tian, and Tan]{SVTP}
|
| 179 |
+
T.~Q. Phan, P. Shivakumara, S. Tian, and C.~L. Tan.
|
| 180 |
+
\newblock Recognizing text with perspective distortion in natural scenes.
|
| 181 |
+
\newblock In \emph{CVPR}, pages 569--576, 2013.
|
| 182 |
+
|
| 183 |
+
\bibitem[Qiao et~al.(2020)Qiao, Zhou, Yang, Zhou, and Wang]{cvpr2020seed}
|
| 184 |
+
Z. Qiao, Y. Zhou, D. Yang, Y. Zhou, and W. Wang.
|
| 185 |
+
\newblock {SEED}: Semantics enhanced encoder-decoder framework for scene text recognition.
|
| 186 |
+
\newblock In \emph{CVPR}, pages 13525--13534, 2020.
|
| 187 |
+
|
| 188 |
+
\bibitem[Rang et~al.(2024)Rang, Bi, Liu, Wang, and Han]{Rang_2024_CVPR_clip4str}
|
| 189 |
+
M. Rang, Z. Bi, C. Liu, Y. Wang, and K. Han.
|
| 190 |
+
\newblock An empirical study of scaling law for scene text recognition.
|
| 191 |
+
\newblock In \emph{CVPR}, pages 15619--15629, 2024.
|
| 192 |
+
|
| 193 |
+
\bibitem[Sheng et~al.(2019)Sheng, Chen, and Xu]{Sheng2019nrtr}
|
| 194 |
+
F. Sheng, Z. Chen, and B. Xu.
|
| 195 |
+
\newblock {NRTR}: A no-recurrence sequence-to-sequence model for scene text recognition.
|
| 196 |
+
\newblock In \emph{ICDAR}, pages 781--786, 2019.
|
| 197 |
+
|
| 198 |
+
\bibitem[Shi et~al.(2017)Shi, Bai, and Yao]{shi2017crnn}
|
| 199 |
+
B. Shi, X. Bai, and C. Yao.
|
| 200 |
+
\newblock An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition.
|
| 201 |
+
\newblock \emph{IEEE Trans. Pattern Anal. Mach. Intell.}, 39\penalty0 (11):\penalty0 2298--2304, 2017.
|
| 202 |
+
|
| 203 |
+
\bibitem[Shi et~al.(2019)Shi, Yang, Wang, Lyu, Yao, and Bai]{shi2019aster}
|
| 204 |
+
B. Shi, M. Yang, X. Wang, P. Lyu, C. Yao, and X. Bai.
|
| 205 |
+
\newblock {ASTER}: An attentional scene text recognizer with flexible rectification.
|
| 206 |
+
\newblock \emph{IEEE Trans. Pattern Anal. Mach. Intell.}, 41\penalty0 (9):\penalty0 2035--2048, 2019.
|
| 207 |
+
|
| 208 |
+
\bibitem[Vaswani et~al.(2017)Vaswani, Shazeer, Parmar, Uszkoreit, Jones, Gomez, Kaiser, and Polosukhin]{NIPS2017_attn}
|
| 209 |
+
A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. Gomez, \L. Kaiser, and I. Polosukhin.
|
| 210 |
+
\newblock Attention is all you need.
|
| 211 |
+
\newblock In \emph{NIPS}, pages 5998--6008, 2017.
|
| 212 |
+
|
| 213 |
+
\bibitem[\vspace{0mm}I. Loshchilov and Hutter(2017)]{cosine}
|
| 214 |
+
\vspace{0mm}I. Loshchilov and F. Hutter.
|
| 215 |
+
\newblock {SGDR:} stochastic gradient descent with warm restarts.
|
| 216 |
+
\newblock In \emph{ICLR}, 2017.
|
| 217 |
+
|
| 218 |
+
\bibitem[Wan et~al.(2019)Wan, Xie, Liu, Bai, and Yao]{arxiv2019_2dctc}
|
| 219 |
+
Z. Wan, F. Xie, Y. Liu, X. Bai, and C. Yao.
|
| 220 |
+
\newblock 2d-ctc for scene text recognition.
|
| 221 |
+
\newblock \emph{CoRR}, abs/1907.09705, 2019.
|
| 222 |
+
|
| 223 |
+
\bibitem[Wang et~al.(2011)Wang, Babenko, and Belongie]{Wang2011SVT}
|
| 224 |
+
K. Wang, B. Babenko, and S. Belongie.
|
| 225 |
+
\newblock End-to-end scene text recognition.
|
| 226 |
+
\newblock In \emph{ICCV}, pages 1457--1464, 2011.
|
| 227 |
+
|
| 228 |
+
\bibitem[Wang et~al.(2022{\natexlab{a}})Wang, Da, and Yao]{mgpstr}
|
| 229 |
+
P. Wang, C. Da, and C. Yao.
|
| 230 |
+
\newblock {Multi-Granularity Prediction} for scene text recognition.
|
| 231 |
+
\newblock In \emph{ECCV}, pages 339--355, 2022{\natexlab{a}}.
|
| 232 |
+
|
| 233 |
+
\bibitem[Wang et~al.(2020)Wang, Zhu, Jin, Luo, Chen, Wu, Wang, and Cai]{wang2020aaai_dan}
|
| 234 |
+
T. Wang, Y. Zhu, L. Jin, C. Luo, X. Chen, Y. Wu, Q. Wang, and M. Cai.
|
| 235 |
+
\newblock Decoupled attention network for text recognition.
|
| 236 |
+
\newblock In \emph{AAAI}, pages 12216--12224, 2020.
|
| 237 |
+
|
| 238 |
+
\bibitem[Wang et~al.(2021)Wang, Xie, Fang, Wang, Zhu, and Zhang]{Wang_2021_visionlan}
|
| 239 |
+
Y. Wang, H. Xie, S. Fang, J. Wang, S. Zhu, and Y. Zhang.
|
| 240 |
+
\newblock {From Two to One}: A new scene text recognizer with visual language modeling network.
|
| 241 |
+
\newblock In \emph{ICCV}, pages 14194--14203, 2021.
|
| 242 |
+
|
| 243 |
+
\bibitem[Wang et~al.(2022{\natexlab{b}})Wang, Xie, Fang, Xing, Wang, Zhu, and Zhang]{wang2022tip_PETR}
|
| 244 |
+
Y. Wang, H. Xie, S. Fang, M. Xing, J. Wang, S. Zhu, and Y. Zhang.
|
| 245 |
+
\newblock {PETR}: Rethinking the capability of transformer-based language model in scene text recognition.
|
| 246 |
+
\newblock \emph{IEEE Trans. Image Process.}, 31:\penalty0 5585--5598, 2022{\natexlab{b}}.
|
| 247 |
+
|
| 248 |
+
\bibitem[Wei et~al.(2024)Wei, Zhan, Lu, Tu, Yin, Liu, and Pal]{Wei_2024_busnet}
|
| 249 |
+
J. Wei, H. Zhan, Y. Lu, X. Tu, B. Yin, C. Liu, and U. Pal.
|
| 250 |
+
\newblock Image as a language: Revisiting scene text recognition via balanced, unified and synchronized vision-language reasoning network.
|
| 251 |
+
\newblock In \emph{AAAI}, pages 5885--5893, 2024.
|
| 252 |
+
|
| 253 |
+
\bibitem[Woo et~al.(2023)Woo, Debnath, Hu, Chen, Liu, Kweon, and Xie]{WooDHC0KX23_ConvNeXtv2}
|
| 254 |
+
S. Woo, S. Debnath, R. Hu, X. Chen, Z. Liu, I.~S. Kweon, and S. Xie.
|
| 255 |
+
\newblock Convnext {V2:} co-designing and scaling convnets with masked autoencoders.
|
| 256 |
+
\newblock In \emph{CVPR}, pages 16133--16142, 2023.
|
| 257 |
+
|
| 258 |
+
\bibitem[Xie et~al.(2022)Xie, Fu, Zhang, Wang, and Bai]{xie2022toward_cornertrans}
|
| 259 |
+
X. Xie, L. Fu, Z. Zhang, Z. Wang, and X. Bai.
|
| 260 |
+
\newblock {Toward Understanding WordArt}: Corner-guided transformer for scene text recognition.
|
| 261 |
+
\newblock In \emph{ECCV}, pages 303--321, 2022.
|
| 262 |
+
|
| 263 |
+
\bibitem[Xu et~al.(2024)Xu, Wang, Xie, and Zhang]{Xu_2024_CVPR_OTE}
|
| 264 |
+
J. Xu, Y. Wang, H. Xie, and Y. Zhang.
|
| 265 |
+
\newblock {OTE}: Exploring accurate scene text recognition using one token.
|
| 266 |
+
\newblock In \emph{CVPR}, pages 28327--28336, 2024.
|
| 267 |
+
|
| 268 |
+
\bibitem[Yang et~al.(2022)Yang, Li, Dai, and Gao]{YangLDG22focalnet}
|
| 269 |
+
Jianwei Yang, Chunyuan Li, Xiyang Dai, and Jianfeng Gao.
|
| 270 |
+
\newblock Focal modulation networks.
|
| 271 |
+
\newblock In \emph{NeurIPS}, pages 4203--4217, 2022.
|
| 272 |
+
|
| 273 |
+
\bibitem[Yang et~al.(2024)Yang, Yang, Liao, Zhu, and Bai]{yang2024class_cam}
|
| 274 |
+
M. Yang, B. Yang, M. Liao, Y. Zhu, and X. Bai.
|
| 275 |
+
\newblock Class-aware mask-guided feature refinement for scene text recognition.
|
| 276 |
+
\newblock \emph{Pattern Recognition}, 149:\penalty0 110244, 2024.
|
| 277 |
+
|
| 278 |
+
\bibitem[Yu et~al.(2020)Yu, Li, Zhang, Liu, Han, Liu, and Ding]{yu2020srn}
|
| 279 |
+
D. Yu, X. Li, C. Zhang, T. Liu, J. Han, J. Liu, and E. Ding.
|
| 280 |
+
\newblock Towards accurate scene text recognition with semantic reasoning networks.
|
| 281 |
+
\newblock In \emph{CVPR}, pages 12113--12122, 2020.
|
| 282 |
+
|
| 283 |
+
\bibitem[Yu et~al.(2023)Yu, Wang, Li, and Xue]{yuICCV2023clipctr}
|
| 284 |
+
H. Yu, X. Wang, B. Li, and X. Xue.
|
| 285 |
+
\newblock Chinese text recognition with a pre-trained {CLIP-Like} model through image-ids aligning.
|
| 286 |
+
\newblock In \emph{ICCV}, pages 11909--11918, 2023.
|
| 287 |
+
|
| 288 |
+
\bibitem[Yue et~al.(2020)Yue, Kuang, Lin, Sun, and Zhang]{yue2020robustscanner}
|
| 289 |
+
X. Yue, Z. Kuang, C. Lin, H. Sun, and W. Zhang.
|
| 290 |
+
\newblock {RobustScanner}: Dynamically enhancing positional clues for robust text recognition.
|
| 291 |
+
\newblock In \emph{ECCV}, pages 135--151, 2020.
|
| 292 |
+
|
| 293 |
+
\bibitem[Zhang et~al.(2023)Zhang, Xie, Wang, Xu, and Zhang]{ijcai2023LPV}
|
| 294 |
+
B. Zhang, H. Xie, Y. Wang, J. Xu, and Y. Zhang.
|
| 295 |
+
\newblock {Linguistic More}: Taking a further step toward efficient and accurate scene text recognition.
|
| 296 |
+
\newblock In \emph{IJCAI}, pages 1704--1712, 2023.
|
| 297 |
+
|
| 298 |
+
\bibitem[Zhang et~al.(2020)Zhang, Yao, Yang, Xu, and Bai]{zhang2020autostr}
|
| 299 |
+
H. Zhang, Q. Yao, M. Yang, Y. Xu, and X. Bai.
|
| 300 |
+
\newblock {AutoSTR}: Efficient backbone search for scene text recognition.
|
| 301 |
+
\newblock In \emph{ECCV}, pages 751--767. Springer, 2020.
|
| 302 |
+
|
| 303 |
+
\bibitem[Zhang et~al.(2024)Zhang, Lu, Liao, Huang, Li, Wang, and Peng]{Zhang_Lu_Liao_Huang_Li_Wang_Peng_2024_DCTC}
|
| 304 |
+
Z. Zhang, N. Lu, M. Liao, Y. Huang, C. Li, M. Wang, and W. Peng.
|
| 305 |
+
\newblock Self-distillation regularized connectionist temporal classification loss for text recognition: A simple yet effective approach.
|
| 306 |
+
\newblock In \emph{AAAI}, pages 7441--7449, 2024.
|
| 307 |
+
|
| 308 |
+
\bibitem[Zhao et~al.(2024{\natexlab{a}})Zhao, Du, Chen, and Jiang]{zhao_2024_acmmm_dptr}
|
| 309 |
+
S. Zhao, Y. Du, Z. Chen, and Y.-G. Jiang.
|
| 310 |
+
\newblock Decoder pre-training with only text for scene text recognition.
|
| 311 |
+
\newblock In \emph{ACM MM}, pages 5191--5200, 2024{\natexlab{a}}.
|
| 312 |
+
|
| 313 |
+
\bibitem[Zhao et~al.(2024{\natexlab{b}})Zhao, Quan, Zhu, and Yang]{zhao_2025_tip_clip4str}
|
| 314 |
+
S. Zhao, R. Quan, L. Zhu, and Y. Yang.
|
| 315 |
+
\newblock {CLIP4STR}: A simple baseline for scene text recognition with pre-trained vision-language model.
|
| 316 |
+
\newblock \emph{IEEE Trans. Image Process.}, 33:\penalty0 6893--6904, 2024{\natexlab{b}}.
|
| 317 |
+
|
| 318 |
+
\bibitem[Zhao et~al.(2024{\natexlab{c}})Zhao, Tang, Lin, Wu, Huang, Liu, Tan, Zhang, and Xie]{Zhao_2024_CVPR_E2STR}
|
| 319 |
+
Z. Zhao, J. Tang, C. Lin, B. Wu, C. Huang, H. Liu, X. Tan, Z. Zhang, and Y. Xie.
|
| 320 |
+
\newblock Multi-modal in-context learning makes an ego-evolving scene text recognizer.
|
| 321 |
+
\newblock In \emph{CVPR}, pages 15567--15576, 2024{\natexlab{c}}.
|
| 322 |
+
|
| 323 |
+
\bibitem[Zheng et~al.(2023)Zheng, Chen, Bai, Xie, and Jiang]{zheng2023tps++}
|
| 324 |
+
T. Zheng, Z. Chen, J. Bai, H. Xie, and Y.-G. Jiang.
|
| 325 |
+
\newblock {TPS++}: Attention-enhanced thin-plate spline for scene text recognition.
|
| 326 |
+
\newblock In \emph{IJCAI}, pages 1777--1785, 2023.
|
| 327 |
+
|
| 328 |
+
\bibitem[Zheng et~al.(2024)Zheng, Chen, Fang, Xie, and Jiang]{zheng2024cdistnet}
|
| 329 |
+
T. Zheng, Z. Chen, S. Fang, H. Xie, and Y.-G. Jiang.
|
| 330 |
+
\newblock {CDistNet}: Perceiving multi-domain character distance for robust text recognition.
|
| 331 |
+
\newblock \emph{Int. J. Comput. Vis.}, 132\penalty0 (2):\penalty0 300--318, 2024.
|
| 332 |
+
|
| 333 |
+
\bibitem[Zhong et~al.(2024)Zhong, Yang, Li, Wang, Tang, Cheng, and Yao]{zhong_2024_acmmm_vlreader}
|
| 334 |
+
H. Zhong, Z. Yang, Z. Li, P. Wang, J. Tang, W. Cheng, and C. Yao.
|
| 335 |
+
\newblock {VL-Reader}: Vision and language reconstructor is an effective scene text recognizer.
|
| 336 |
+
\newblock In \emph{ACM MM}, pages 4207--4216, 2024.
|
| 337 |
+
|
| 338 |
+
\bibitem[Zhou et~al.(2024)Zhou, Qu, Wang, Li, Zhang, and Xie]{zhou2024cff}
|
| 339 |
+
B. Zhou, Y. Qu, Z. Wang, Z. Li, B. Zhang, and H. Xie.
|
| 340 |
+
\newblock Focus on the whole character: Discriminative character modeling for scene text recognition.
|
| 341 |
+
\newblock In \emph{IJCAI}, pages 1762--1770, 2024.
|
| 342 |
+
|
| 343 |
+
\end{thebibliography}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.bib
ADDED
|
@@ -0,0 +1,1225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@String(PAMI = {IEEE Trans. Pattern Anal. Mach. Intell.})
|
| 2 |
+
@String(IJCV = {Int. J. Comput. Vis.})
|
| 3 |
+
@String(CVPR= {IEEE Conf. Comput. Vis. Pattern Recog.})
|
| 4 |
+
@String(ICCV= {Int. Conf. Comput. Vis.})
|
| 5 |
+
@String(ECCV= {Eur. Conf. Comput. Vis.})
|
| 6 |
+
@String(NIPS= {Adv. Neural Inform. Process. Syst.})
|
| 7 |
+
@String(ICPR = {Int. Conf. Pattern Recog.})
|
| 8 |
+
@String(BMVC= {Brit. Mach. Vis. Conf.})
|
| 9 |
+
@String(TOG= {ACM Trans. Graph.})
|
| 10 |
+
@String(TIP = {IEEE Trans. Image Process.})
|
| 11 |
+
@String(TVCG = {IEEE Trans. Vis. Comput. Graph.})
|
| 12 |
+
@String(TMM = {IEEE Trans. Multimedia})
|
| 13 |
+
@String(ACMMM= {ACM Int. Conf. Multimedia})
|
| 14 |
+
@String(ICME = {Int. Conf. Multimedia and Expo})
|
| 15 |
+
@String(ICASSP= {ICASSP})
|
| 16 |
+
@String(ICIP = {IEEE Int. Conf. Image Process.})
|
| 17 |
+
@String(ACCV = {ACCV})
|
| 18 |
+
@String(ICLR = {Int. Conf. Learn. Represent.})
|
| 19 |
+
@String(IJCAI = {IJCAI})
|
| 20 |
+
@String(PR = {Pattern Recognition})
|
| 21 |
+
@String(AAAI = {AAAI})
|
| 22 |
+
@String(CVPRW= {IEEE Conf. Comput. Vis. Pattern Recog. Worksh.})
|
| 23 |
+
@String(CSVT = {IEEE Trans. Circuit Syst. Video Technol.})
|
| 24 |
+
|
| 25 |
+
@String(SPL = {IEEE Sign. Process. Letters})
|
| 26 |
+
@String(VR = {Vis. Res.})
|
| 27 |
+
@String(JOV = {J. Vis.})
|
| 28 |
+
@String(TVC = {The Vis. Comput.})
|
| 29 |
+
@String(JCST = {J. Comput. Sci. Tech.})
|
| 30 |
+
@String(CGF = {Comput. Graph. Forum})
|
| 31 |
+
@String(CVM = {Computational Visual Media})
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@String(PAMI = {IEEE TPAMI})
|
| 35 |
+
@String(IJCV = {IJCV})
|
| 36 |
+
@String(CVPR = {CVPR})
|
| 37 |
+
@String(ICCV = {ICCV})
|
| 38 |
+
@String(ECCV = {ECCV})
|
| 39 |
+
@String(NIPS = {NeurIPS})
|
| 40 |
+
@String(ICPR = {ICPR})
|
| 41 |
+
@String(BMVC = {BMVC})
|
| 42 |
+
@String(TOG = {ACM TOG})
|
| 43 |
+
@String(TIP = {IEEE TIP})
|
| 44 |
+
@String(TVCG = {IEEE TVCG})
|
| 45 |
+
@String(TCSVT = {IEEE TCSVT})
|
| 46 |
+
@String(TMM = {IEEE TMM})
|
| 47 |
+
@String(ACMMM = {ACM MM})
|
| 48 |
+
@String(ICME = {ICME})
|
| 49 |
+
@String(ICASSP= {ICASSP})
|
| 50 |
+
@String(ICIP = {ICIP})
|
| 51 |
+
@String(ACCV = {ACCV})
|
| 52 |
+
@String(ICLR = {ICLR})
|
| 53 |
+
@String(IJCAI = {IJCAI})
|
| 54 |
+
@String(PR = {PR})
|
| 55 |
+
@String(AAAI = {AAAI})
|
| 56 |
+
@String(CVPRW= {CVPRW})
|
| 57 |
+
@String(CSVT = {IEEE TCSVT})
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@ARTICLE{tpami2023I2C2W,
|
| 62 |
+
author={Xue, Chuhui and Huang, Jiaxing and Zhang, Wenqing and Lu, Shijian and Wang, Changhu and Bai, Song},
|
| 63 |
+
title={{Image-to-Character-to-Word} Transformers for Accurate Scene Text Recognition},
|
| 64 |
+
year = {2023},
|
| 65 |
+
volume = {45},
|
| 66 |
+
number = {11},
|
| 67 |
+
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 68 |
+
pages = {12908–12921}}
|
| 69 |
+
|
| 70 |
+
@inproceedings{ijcai2023LPV,
|
| 71 |
+
title = {{Linguistic More}: Taking a Further Step toward Efficient and Accurate Scene Text Recognition},
|
| 72 |
+
author = {Zhang, B. and Xie, H. and Wang, Y. and Xu, J. and Zhang, Y.},
|
| 73 |
+
booktitle = {IJCAI},
|
| 74 |
+
pages = {1704--1712},
|
| 75 |
+
year = {2023},
|
| 76 |
+
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
@article{TPAMI2022ABINetPP,
|
| 80 |
+
author = {Fang, Shancheng and Mao, Zhendong and Xie, Hongtao and Wang, Yuxin and Yan, Chenggang and Zhang, Yongdong},
|
| 81 |
+
title = {{ABINet}++: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Spotting},
|
| 82 |
+
year = {2023},
|
| 83 |
+
volume = {45},
|
| 84 |
+
number = {6},
|
| 85 |
+
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 86 |
+
pages = {7123–7141},
|
| 87 |
+
numpages = {19}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
@InProceedings{iccv2023lister,
|
| 91 |
+
title={{LISTER}: Neighbor Decoding for Length-Insensitive Scene Text Recognition},
|
| 92 |
+
author={C. Cheng and P. Wang and C. Da and Q. Zheng and C. Yao},
|
| 93 |
+
booktitle={ICCV},
|
| 94 |
+
pages={19484-19494},
|
| 95 |
+
year={2023}
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
@ARTICLE{wang2022tip_PETR,
|
| 99 |
+
author={Wang, Y. and Xie, H. and Fang, S. and Xing, M. and Wang, J. and Zhu, S. and Zhang, Y.},
|
| 100 |
+
journal={IEEE Trans. Image Process.},
|
| 101 |
+
title={{PETR}: Rethinking the Capability of Transformer-Based Language Model in Scene Text Recognition},
|
| 102 |
+
year={2022},
|
| 103 |
+
volume={31},
|
| 104 |
+
pages={5585-5598}}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@InProceedings{Guan_2023_CVPR_SIGA,
|
| 108 |
+
author = {Guan, T. and Gu, C. and Tu, J. and Yang, X. and Feng, Q. and Zhao, Y. and Shen, W.},
|
| 109 |
+
title = {{Self-Supervised} Implicit Glyph Attention for Text Recognition},
|
| 110 |
+
booktitle = {CVPR},
|
| 111 |
+
year = {2023},
|
| 112 |
+
pages = {15285-15294}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
@InProceedings{Liu_2022_CVPR_OpenCCD,
|
| 116 |
+
author = {Liu, C. and Yang, C. and Yin, X.},
|
| 117 |
+
title = {{Open-Set} Text Recognition via Character-Context Decoupling},
|
| 118 |
+
booktitle = {CVPR},
|
| 119 |
+
year = {2022},
|
| 120 |
+
pages = {4523-4532}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
@InProceedings{Guan_2023_ICCV_CCD,
|
| 124 |
+
author = {Guan, T. and Shen, W. and Yang, X. and Feng, Q. and Jiang, Z. and Yang, X.},
|
| 125 |
+
title = {{Self-Supervised Character-to-Character} Distillation for Text Recognition},
|
| 126 |
+
booktitle = {ICCV},
|
| 127 |
+
year = {2023},
|
| 128 |
+
pages = {19473-19484}
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
@InProceedings{mgpstr,
|
| 132 |
+
author="Wang, P.
|
| 133 |
+
and Da, C.
|
| 134 |
+
and Yao, C.",
|
| 135 |
+
title="{Multi-Granularity Prediction} for Scene Text Recognition",
|
| 136 |
+
booktitle="ECCV",
|
| 137 |
+
year="2022",
|
| 138 |
+
pages="339--355",
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
@InProceedings{MATRN,
|
| 142 |
+
author="Na, B.
|
| 143 |
+
and Kim, Y.
|
| 144 |
+
and Park, S.",
|
| 145 |
+
title="{Multi-modal Text Recognition Networks}: Interactive Enhancements Between Visual and Semantic Features",
|
| 146 |
+
booktitle="ECCV",
|
| 147 |
+
year="2022",
|
| 148 |
+
pages="446--463",
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
@InProceedings{sgbanet,
|
| 152 |
+
author="Zhong, D.
|
| 153 |
+
and Lyu, S.
|
| 154 |
+
and Shivakumara, P.
|
| 155 |
+
and Yin, B.
|
| 156 |
+
and Wu, J.
|
| 157 |
+
and Pal, U.
|
| 158 |
+
and Lu, Y.",
|
| 159 |
+
title="{SGBANet}: Semantic GAN and Balanced Attention Network for Arbitrarily Oriented Scene Text Recognition",
|
| 160 |
+
booktitle="ECCV",
|
| 161 |
+
year="2022",
|
| 162 |
+
pages="464--480",
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
@InProceedings{levocr,
|
| 166 |
+
author="Da, C.
|
| 167 |
+
and Wang, P.
|
| 168 |
+
and Yao, C.",
|
| 169 |
+
title="{Levenshtein OCR}",
|
| 170 |
+
booktitle="ECCV",
|
| 171 |
+
year="2022",
|
| 172 |
+
pages="322--338",
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@article{pr2019MORAN,
|
| 178 |
+
author = {Luo, C. and Jin, L. and Sun, Z.},
|
| 179 |
+
title = {{MORAN}: A Multi-Object Rectified Attention Network for Scene Text Recognition},
|
| 180 |
+
journal = {Pattern Recognit.},
|
| 181 |
+
volume = {90},
|
| 182 |
+
pages = {109--118},
|
| 183 |
+
year = {2019},
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
@inproceedings{YangLDG22focalnet,
|
| 187 |
+
author = {Jianwei Yang and
|
| 188 |
+
Chunyuan Li and
|
| 189 |
+
Xiyang Dai and
|
| 190 |
+
Jianfeng Gao},
|
| 191 |
+
title = {Focal Modulation Networks},
|
| 192 |
+
booktitle = {NeurIPS},
|
| 193 |
+
year = {2022},
|
| 194 |
+
pages={4203--4217},
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
@INPROCEEDINGS{cvpr2020seed,
|
| 198 |
+
author = {Z. Qiao and Y. Zhou and D. Yang and Y. Zhou and W. Wang},
|
| 199 |
+
booktitle = {CVPR},
|
| 200 |
+
title = {{SEED}: Semantics Enhanced Encoder-Decoder Framework for Scene Text Recognition},
|
| 201 |
+
year = {2020},
|
| 202 |
+
pages = {13525-13534},
|
| 203 |
+
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
@INPROCEEDINGS{cvpr2021TransOCR,
|
| 207 |
+
author={Chen, J. and Li, B. and Xue, X.},
|
| 208 |
+
booktitle={CVPR},
|
| 209 |
+
title={{Scene Text Telescope}: Text-Focused Scene Image Super-Resolution},
|
| 210 |
+
year={2021},
|
| 211 |
+
pages={12021-12030}}
|
| 212 |
+
|
| 213 |
+
@article{pr2021MASTER,
|
| 214 |
+
title = {{MASTER}: Multi-aspect non-local network for scene text recognition},
|
| 215 |
+
journal = {Pattern Recognit.},
|
| 216 |
+
volume = {117},
|
| 217 |
+
pages = {107980},
|
| 218 |
+
year = {2021},
|
| 219 |
+
author = {N. Lu and W. Yu and X. Qi and Y. Chen and P. Gong and R. Xiao and X. Bai},
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@inproceedings{yuICCV2023clipctr,
|
| 224 |
+
title={Chinese Text Recognition with A Pre-Trained {CLIP-Like} Model Through Image-IDS Aligning},
|
| 225 |
+
author={H. Yu and X. Wang and B. Li and X. Xue},
|
| 226 |
+
year={2023},
|
| 227 |
+
booktitle = {ICCV},
|
| 228 |
+
pages={11909-11918}
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
@inproceedings{duijcai2022svtr,
|
| 232 |
+
title = {{SVTR}: Scene Text Recognition with a Single Visual Model},
|
| 233 |
+
author = {Du, Y. and Chen, Z. and Jia, C. and Yin, X. and Zheng, T. and Li, C. and Du, Y. and Jiang, Y.-G.},
|
| 234 |
+
booktitle = {IJCAI},
|
| 235 |
+
pages = {884--890},
|
| 236 |
+
year = {2022},
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
@article{mmocr2021,
|
| 240 |
+
title={{MMOCR}: A Comprehensive Toolbox for Text Detection, Recognition and Understanding},
|
| 241 |
+
author={Kuang, Z. and Sun, H. and Li, Z. and Yue, X. and Lin, T. and Chen, J. and Wei, H. and Zhu, Y. and Gao, T. and Zhang, W. and Chen, K. and Zhang, W. and Lin, D.},
|
| 242 |
+
journal = {CoRR},
|
| 243 |
+
volume = {abs/2108.06543},
|
| 244 |
+
eprinttype = {arXiv},
|
| 245 |
+
year={2021}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
@inproceedings{HeC0LHWD22GTR,
|
| 249 |
+
author = {Y. He and
|
| 250 |
+
C. Chen and
|
| 251 |
+
J. Zhang and
|
| 252 |
+
J. Liu and
|
| 253 |
+
F. He and
|
| 254 |
+
C. Wang and
|
| 255 |
+
B. Du},
|
| 256 |
+
title = {Visual Semantics Allow for Textual Reasoning Better in Scene Text Recognition},
|
| 257 |
+
booktitle = {AAAI},
|
| 258 |
+
pages = {888--896},
|
| 259 |
+
year = {2022},
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
@inproceedings{BautistaA22PARSeq,
|
| 263 |
+
author = {D. Bautista and
|
| 264 |
+
R.l Atienza},
|
| 265 |
+
title = {Scene Text Recognition with Permuted Autoregressive Sequence Models},
|
| 266 |
+
booktitle = {ECCV},
|
| 267 |
+
pages = {178--196},
|
| 268 |
+
year = {2022},
|
| 269 |
+
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
@inproceedings{cosine,
|
| 273 |
+
author = {\vspace{0mm}I. Loshchilov and F. Hutter},
|
| 274 |
+
title = {{SGDR:} Stochastic Gradient Descent with Warm Restarts},
|
| 275 |
+
booktitle = {ICLR},
|
| 276 |
+
year = {2017},
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
@article{ppocrv3,
|
| 280 |
+
author = {C. Li and W. Liu and R. Guo and X. Yin and K. Jiang and Y. Du and Y. Du and L. Zhu and B. Lai and X. Hu and D. Yu and Y. Ma},
|
| 281 |
+
title = {{PP-OCRv3}: More Attempts for the Improvement of Ultra Lightweight OCR System},
|
| 282 |
+
journal = {CoRR},
|
| 283 |
+
volume = {abs/2206.03001},
|
| 284 |
+
year = {2022},
|
| 285 |
+
eprinttype = {arXiv},
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
@inproceedings{adamw,
|
| 289 |
+
author = {I. Loshchilov and
|
| 290 |
+
F. Hutter},
|
| 291 |
+
title = {Decoupled Weight Decay Regularization},
|
| 292 |
+
booktitle = {ICLR},
|
| 293 |
+
year = {2019},
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
@inproceedings{NIPS2017_attn,
|
| 297 |
+
author = {Vaswani, A. and Shazeer, N. and Parmar, N. and Uszkoreit, J. and Jones, L. and Gomez, A. and Kaiser, \L. and Polosukhin, I.},
|
| 298 |
+
booktitle = {NIPS},
|
| 299 |
+
title = {Attention is All you Need},
|
| 300 |
+
pages = {5998--6008},
|
| 301 |
+
year = {2017}
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
@inproceedings{seqtoseq,
|
| 305 |
+
author = {I. Sutskever and
|
| 306 |
+
O. Vinyals and
|
| 307 |
+
Q. V. Le},
|
| 308 |
+
title = {Sequence to Sequence Learning with Neural Networks},
|
| 309 |
+
booktitle = {NIPS},
|
| 310 |
+
pages = {3104--3112},
|
| 311 |
+
year = {2014}
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
@inproceedings{SMT,
|
| 315 |
+
author = {K. Cho and
|
| 316 |
+
B. Merrienboer and
|
| 317 |
+
{\c{C}}. G{\"{u}}l{\c{c}}ehre and
|
| 318 |
+
D. Bahdanau and
|
| 319 |
+
F. Bougares and
|
| 320 |
+
H. Schwenk and
|
| 321 |
+
Y. Bengio},
|
| 322 |
+
title = {Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation},
|
| 323 |
+
booktitle = {EMNLP},
|
| 324 |
+
pages = {1724--1734},
|
| 325 |
+
year = {2014}
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
@inproceedings{bert,
|
| 329 |
+
author = {J. Devlin and
|
| 330 |
+
M. Chang and
|
| 331 |
+
K. Lee and
|
| 332 |
+
K. Toutanova},
|
| 333 |
+
title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language
|
| 334 |
+
Understanding},
|
| 335 |
+
booktitle = {NAACL-HLT},
|
| 336 |
+
pages = {4171--4186},
|
| 337 |
+
|
| 338 |
+
year = {2019}
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
@inproceedings{LST,
|
| 342 |
+
author = {W. Chan and
|
| 343 |
+
N. Jaitly and
|
| 344 |
+
Q. V. Le and
|
| 345 |
+
O. Vinyals},
|
| 346 |
+
title = {Listen, attend and spell: {A} neural network for large vocabulary
|
| 347 |
+
conversational speech recognition},
|
| 348 |
+
booktitle = {ICASSP},
|
| 349 |
+
pages = {4960--4964},
|
| 350 |
+
year = {2016},
|
| 351 |
+
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
@article{lstm,
|
| 355 |
+
author = {S. Hochreiter and
|
| 356 |
+
J. Schmidhuber},
|
| 357 |
+
title = {Long Short-Term Memory},
|
| 358 |
+
journal = {Neural Comput.},
|
| 359 |
+
volume = {9},
|
| 360 |
+
number = {8},
|
| 361 |
+
pages = {1735--1780},
|
| 362 |
+
year = {1997}
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
@article{gru,
|
| 366 |
+
author = {J. Chung and
|
| 367 |
+
{\c{C}}. G{\"{u}}l{\c{c}}ehre and
|
| 368 |
+
K. Cho and
|
| 369 |
+
Y. Bengio},
|
| 370 |
+
title = {Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
|
| 371 |
+
Modeling},
|
| 372 |
+
journal = {CoRR},
|
| 373 |
+
volume = {abs/1412.3555},
|
| 374 |
+
eprinttype = {arXiv},
|
| 375 |
+
year={2014}
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
@inproceedings{xie2019aggregation,
|
| 379 |
+
title={Aggregation cross-entropy for sequence recognition},
|
| 380 |
+
author={Xie, Zecheng and Huang, Yaoxiong and Zhu, Yuanzhi and Jin, Lianwen and Liu, Yuliang and Xie, Lele},
|
| 381 |
+
booktitle={CVPR},
|
| 382 |
+
pages={6538--6547},
|
| 383 |
+
year={2019}
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
@inproceedings{li2022counting,
|
| 387 |
+
title={{When Counting Meets HMER}: Counting-Aware Network for Handwritten Mathematical Expression Recognition},
|
| 388 |
+
author={Li, B. and Yuan, Y. and Liang, D. and Liu, X. and Ji, Z. and Bai, J. and Liu, W. and Bai, X.},
|
| 389 |
+
booktitle={ECCV},
|
| 390 |
+
pages={197--214},
|
| 391 |
+
year={2022}
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
@inproceedings{jiang2021reciprocal,
|
| 395 |
+
title={Reciprocal feature learning via explicit and implicit tasks in scene text recognition},
|
| 396 |
+
author={Jiang, H. and Xu, Y. and Cheng, Z. and Pu, S. and Niu, Y.and Ren, W. and Wu, F. and Tan, W.},
|
| 397 |
+
booktitle={ICDAR},
|
| 398 |
+
pages={287--303},
|
| 399 |
+
year={2021},
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
@inproceedings{wang2020decoupled,
|
| 403 |
+
title={Decoupled attention network for text recognition},
|
| 404 |
+
author={Wang, T. and Zhu, Y. and Jin, L. and Luo, C. and Chen, X. and Wu, Y. and Wang, Q. and Cai, M.},
|
| 405 |
+
booktitle={AAAI},
|
| 406 |
+
pages={12216--12224},
|
| 407 |
+
year={2020}
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
@inproceedings{qiao2021pimnet,
|
| 411 |
+
title={{PIMNet}: a parallel, iterative and mimicking network for scene text recognition},
|
| 412 |
+
author={Qiao, Z. and Zhou, Y. and Wei, J. and Wang, W. and Zhang, Y. and Jiang, N. and Wang, H. and Wang, W.},
|
| 413 |
+
booktitle={ACM MM},
|
| 414 |
+
pages={2046--2055},
|
| 415 |
+
year={2021}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
@article{shi2019aster,
|
| 419 |
+
author={Shi, B. and Yang, M. and Wang, X. and Lyu, P. and Yao, C. and Bai, X.},
|
| 420 |
+
journal={IEEE Trans. Pattern Anal. Mach. Intell.}, title={{ASTER}: An Attentional Scene Text Recognizer with Flexible Rectification},
|
| 421 |
+
volume = {41},
|
| 422 |
+
number = {9},
|
| 423 |
+
pages = {2035--2048},
|
| 424 |
+
year = {2019}
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
@inproceedings{Zhang_Lu_Liao_Huang_Li_Wang_Peng_2024_DCTC, title={Self-Distillation Regularized Connectionist Temporal Classification Loss for Text Recognition: A Simple Yet Effective Approach}, booktitle={AAAI}, author={Zhang, Z. and Lu, N. and Liao, M. and Huang, Y. and Li, C. and Wang, M. and Peng, W.}, year={2024}, pages={7441-7449} }
|
| 428 |
+
|
| 429 |
+
@inproceedings{Wei_2024_busnet, title={Image as a Language: Revisiting Scene Text Recognition via Balanced, Unified and Synchronized Vision-Language Reasoning Network}, booktitle={AAAI}, author={Wei, J. and Zhan, H. and Lu, Y. and Tu, X. and Yin, B. and Liu, C. and Pal, U.}, year={2024}, pages={5885-5893} }
|
| 430 |
+
|
| 431 |
+
@article{shi2017crnn,
|
| 432 |
+
author={Shi, B. and Bai, X. and Yao, C.},
|
| 433 |
+
journal={IEEE Trans. Pattern Anal. Mach. Intell.}, title={An End-to-End Trainable Neural Network for Image-Based Sequence Recognition and Its Application to Scene Text Recognition},
|
| 434 |
+
year={2017},
|
| 435 |
+
volume = {39},
|
| 436 |
+
number = {11},
|
| 437 |
+
pages={2298-2304},
|
| 438 |
+
doi={10.1109/TPAMI.2016.2646371}}
|
| 439 |
+
|
| 440 |
+
@inproceedings{Sheng2019nrtr,
|
| 441 |
+
title={{NRTR}: A No-Recurrence Sequence-to-Sequence Model for Scene Text Recognition},
|
| 442 |
+
author={F. Sheng and Z. Chen and B. Xu},
|
| 443 |
+
booktitle={ICDAR},
|
| 444 |
+
year={2019},
|
| 445 |
+
pages={781-786}
|
| 446 |
+
}
|
| 447 |
+
@inproceedings{li2019sar,
|
| 448 |
+
title={Show, attend and read: A simple and strong baseline for irregular text recognition},
|
| 449 |
+
author={Li, H. and Wang, P. and Shen, C. and Zhang, G.},
|
| 450 |
+
booktitle={AAAI},
|
| 451 |
+
pages={8610--8617},
|
| 452 |
+
year={2019}
|
| 453 |
+
}
|
| 454 |
+
@inproceedings{zhang2020autostr,
|
| 455 |
+
title={{AutoSTR}: Efficient backbone search for scene text recognition},
|
| 456 |
+
author={Zhang, H. and Yao, Q. and Yang, M. and Xu, Y. and Bai, X.},
|
| 457 |
+
booktitle={ECCV},
|
| 458 |
+
pages={751--767},
|
| 459 |
+
year={2020},
|
| 460 |
+
organization={Springer}
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
@inproceedings{CTC,
|
| 464 |
+
author = {Graves, A. and Fern\'{a}ndez, S. and Gomez, F. and Schmidhuber, J.},
|
| 465 |
+
title = {Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks},
|
| 466 |
+
year = {2006},
|
| 467 |
+
|
| 468 |
+
booktitle = {ICML},
|
| 469 |
+
pages = {369-376},
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
@INPROCEEDINGS{segfree,
|
| 473 |
+
author={Messina, R. and Louradour, J.},
|
| 474 |
+
booktitle={ICDAR},
|
| 475 |
+
title={Segmentation-free handwritten Chinese text recognition with LSTM-RNN},
|
| 476 |
+
year={2015},
|
| 477 |
+
pages={171-175}
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
@inproceedings{whatwrong,
|
| 481 |
+
author = {J. Baek and
|
| 482 |
+
G. Kim and
|
| 483 |
+
J. Lee and
|
| 484 |
+
S. Park and
|
| 485 |
+
D. Han and
|
| 486 |
+
S. Yun and
|
| 487 |
+
S. J. Oh and
|
| 488 |
+
H. Lee},
|
| 489 |
+
title = {What Is Wrong With Scene Text Recognition Model Comparisons? Dataset and Model Analysis},
|
| 490 |
+
booktitle = {ICCV},
|
| 491 |
+
pages = {4714--4722},
|
| 492 |
+
year = {2019},
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
@article{long2021scene,
|
| 496 |
+
title={Scene text detection and recognition: The deep learning era},
|
| 497 |
+
author={Long, Shangbang and He, Xin and Yao, Cong},
|
| 498 |
+
journal={Int. J. Comput. Vis.},
|
| 499 |
+
volume={129},
|
| 500 |
+
number={1},
|
| 501 |
+
pages={161--184},
|
| 502 |
+
year={2021},
|
| 503 |
+
publisher={Springer}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
@inproceedings{fang2021abinet,
|
| 507 |
+
title={{Read Like Humans}: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition},
|
| 508 |
+
author={Fang, S. and Xie, H. and Wang, Y. and Mao, Z. and Zhang, Y.},
|
| 509 |
+
booktitle={CVPR},
|
| 510 |
+
pages={7098-7107},
|
| 511 |
+
year={2021}
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
@inproceedings{wang2020aaai_dan, title={Decoupled Attention Network for Text Recognition}, booktitle={AAAI}, author={Wang, T. and Zhu, Y. and Jin, L. and Luo, C. and Chen, X. and Wu, Y. and Wang, Q. and Cai, M.}, year={2020}, pages={12216-12224}}
|
| 515 |
+
|
| 516 |
+
@inproceedings{lee2020recognizing,
|
| 517 |
+
title={On recognizing texts of arbitrary shapes with 2D self-attention},
|
| 518 |
+
author={Lee, J. and Park, S. and Baek, J. and Oh, S. and Kim, S. and Lee, H.},
|
| 519 |
+
booktitle={CVPR Workshops},
|
| 520 |
+
pages = {2326--2335},
|
| 521 |
+
year = {2020},
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
@InProceedings{He_2016_CVPR,
|
| 525 |
+
author = {He, K. and Zhang, X. and Ren, S. and Sun, J.},
|
| 526 |
+
title = {Deep Residual Learning for Image Recognition},
|
| 527 |
+
booktitle = {CVPR},
|
| 528 |
+
year = {2016},
|
| 529 |
+
pages = {770-778}
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
@article{yang2024class_cam,
|
| 533 |
+
title={Class-Aware Mask-guided feature refinement for scene text recognition},
|
| 534 |
+
author={Yang, M. and Yang, B. and Liao, M. and Zhu, Y. and Bai, X.},
|
| 535 |
+
journal={Pattern Recognition},
|
| 536 |
+
volume={149},
|
| 537 |
+
pages={110244},
|
| 538 |
+
year={2024},
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
@inproceedings{xie2022toward_cornertrans,
|
| 542 |
+
title={{Toward Understanding WordArt}: Corner-Guided Transformer for Scene Text Recognition},
|
| 543 |
+
author={Xie, X. and Fu, L. and Zhang, Z. and Wang, Z. and Bai, X.},
|
| 544 |
+
booktitle={ECCV},
|
| 545 |
+
pages={303--321},
|
| 546 |
+
year={2022}
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
@inproceedings{wan2020textscanner,
|
| 550 |
+
title={{TextScanner}: Reading characters in order for robust scene text recognition},
|
| 551 |
+
author={Wan, Z. and He, M. and Chen, H. and Bai, X. and Yao, C.},
|
| 552 |
+
booktitle={AAAI},
|
| 553 |
+
pages={12120--12127},
|
| 554 |
+
year={2020}
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
@article{jmlr_2019_nas,
|
| 558 |
+
author = {T. Elsken and
|
| 559 |
+
J. H. Metzen and
|
| 560 |
+
F. Hutter},
|
| 561 |
+
title = {Neural Architecture Search: {A} Survey},
|
| 562 |
+
journal = {J. Mach. Learn. Res.},
|
| 563 |
+
volume = {20},
|
| 564 |
+
pages = {55:1--55:21},
|
| 565 |
+
year = {2019},
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
@inproceedings{yue2020robustscanner,
|
| 569 |
+
title={{RobustScanner}: Dynamically enhancing positional clues for robust text recognition},
|
| 570 |
+
author={Yue, X. and Kuang, Z. and Lin, C. and Sun, H. and Zhang, W.},
|
| 571 |
+
booktitle={ECCV},
|
| 572 |
+
pages={135--151},
|
| 573 |
+
year={2020},
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
@article{zheng2024cdistnet,
|
| 577 |
+
title={{CDistNet}: Perceiving multi-domain character distance for robust text recognition},
|
| 578 |
+
author={Zheng, T. and Chen, Z. and Fang, S. and Xie, H. and Jiang, Y.-G.},
|
| 579 |
+
journal={Int. J. Comput. Vis.},
|
| 580 |
+
volume={132},
|
| 581 |
+
number={2},
|
| 582 |
+
pages={300--318},
|
| 583 |
+
year={2024}
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
@inproceedings{hu2020gtc,
|
| 587 |
+
title={{GTC}: Guided training of ctc towards efficient and accurate scene text recognition},
|
| 588 |
+
author={Hu, W. and Cai, X. and Hou, J. and Yi, S. and Lin, Z.},
|
| 589 |
+
booktitle={AAAI},
|
| 590 |
+
pages={11005--11012},
|
| 591 |
+
year={2020}
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
@article{ChenJZLW21str_survey,
|
| 595 |
+
author = {X. Chen and
|
| 596 |
+
L. Jin and
|
| 597 |
+
Y. Zhu and
|
| 598 |
+
C. Luo and
|
| 599 |
+
T. Wang},
|
| 600 |
+
title = {Text Recognition in the Wild: {A} Survey},
|
| 601 |
+
journal = {{ACM} Comput. Surv.},
|
| 602 |
+
volume = {54},
|
| 603 |
+
number = {2},
|
| 604 |
+
pages = {42:1--42:35},
|
| 605 |
+
year = {2022},
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
@inproceedings{yu2020srn,
|
| 609 |
+
title={Towards accurate scene text recognition with semantic reasoning networks},
|
| 610 |
+
author={Yu, D. and Li, X. and Zhang, C. and Liu, T. and Han, J. and Liu, J. and Ding, E.},
|
| 611 |
+
booktitle={CVPR},
|
| 612 |
+
pages={12113--12122},
|
| 613 |
+
year={2020}
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
@article{du2024igtr,
|
| 617 |
+
title={Instruction-Guided Scene Text Recognition},
|
| 618 |
+
author={Y. Du and Z. Chen and Y. Su and C. Jia and Y.-G. Jiang},
|
| 619 |
+
year={2025},
|
| 620 |
+
doi={10.1109/TPAMI.2025.3525526},
|
| 621 |
+
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 622 |
+
volume = {47},
|
| 623 |
+
number = {4},
|
| 624 |
+
pages = {2723--2738},
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
@inproceedings{zhou2024cff,
|
| 628 |
+
title={Focus on the Whole Character: Discriminative Character Modeling for Scene Text Recognition},
|
| 629 |
+
author={B. Zhou and Y. Qu and Z. Wang and Z. Li and B. Zhang and H. Xie},
|
| 630 |
+
booktitle={IJCAI},
|
| 631 |
+
pages={1762--1770},
|
| 632 |
+
year={2024}
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
@InProceedings{Xu_2024_CVPR_OTE,
|
| 636 |
+
author = {Xu, J. and Wang, Y. and Xie, H. and Zhang, Y.},
|
| 637 |
+
title = {{OTE}: Exploring Accurate Scene Text Recognition Using One Token},
|
| 638 |
+
booktitle = {CVPR},
|
| 639 |
+
year = {2024},
|
| 640 |
+
pages = {28327-28336}
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
@inproceedings{yan2021pren,
|
| 644 |
+
author = {Yan, R. and
|
| 645 |
+
Peng, L. and
|
| 646 |
+
Xiao, S. and
|
| 647 |
+
Yao, G.},
|
| 648 |
+
title = {Primitive Representation Learning for Scene Text Recognition},
|
| 649 |
+
booktitle = {CVPR},
|
| 650 |
+
year = {2021},
|
| 651 |
+
pages = {284-293}
|
| 652 |
+
}
|
| 653 |
+
@inproceedings{lee2016attention,
|
| 654 |
+
title={Recursive recurrent nets with attention modeling for ocr in the wild},
|
| 655 |
+
author={Lee, C. and Osindero, S.},
|
| 656 |
+
booktitle={CVPR},
|
| 657 |
+
pages={2231--2239},
|
| 658 |
+
year={2016}
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
@inproceedings{elsken2019nas,
|
| 662 |
+
title={Neural architecture search: A survey},
|
| 663 |
+
author={Elsken, T. and Metzen, J. H. and Hutter, F.},
|
| 664 |
+
booktitle={JMLR},
|
| 665 |
+
pages={1997--2017},
|
| 666 |
+
year={2019},
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
@inproceedings{dosovitskiy2020vit,
|
| 671 |
+
author = {Dosovitskiy, A. and Beyer, L. and Kolesnikov, A. and Weissenborn, D. and Zhai, X. and Unterthiner, T. and Dehghani, M. and Minderer, M. and Heigold, G. and Gelly, S. and Uszkoreit, J. and Houlsby, N.},
|
| 672 |
+
title = {An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
|
| 673 |
+
booktitle = {ICLR},
|
| 674 |
+
year = {2021}
|
| 675 |
+
}
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
@article{dong2021cswin,
|
| 680 |
+
title={{CSWin Transformer}: A General Vision Transformer Backbone with Cross-Shaped Windows},
|
| 681 |
+
author={Xiaoyi Dong and Jianmin Bao and Dongdong Chen and Weiming Zhang and Nenghai Yu and Lu Yuan and Dong Chen and Baining Guo},
|
| 682 |
+
primaryClass={cs.CV},
|
| 683 |
+
journal = {CoRR},
|
| 684 |
+
volume = {abs/2107.00652},
|
| 685 |
+
eprinttype = {arXiv},
|
| 686 |
+
year={2021}
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
@inproceedings{liu2021Swin,
|
| 690 |
+
title={{Swin Transformer}: Hierarchical Vision Transformer using Shifted Windows},
|
| 691 |
+
author={L., Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
|
| 692 |
+
booktitle={ICCV},
|
| 693 |
+
pages={9992-10002},
|
| 694 |
+
year={2021}
|
| 695 |
+
}
|
| 696 |
+
|
| 697 |
+
@inproceedings{wang2021pvt,
|
| 698 |
+
title={{Pyramid Vision Transformer}: A Versatile Backbone for Dense Prediction without Convolutions},
|
| 699 |
+
author={Wang, W. and Xie, E. and Li, X. and Fan, D. and Song, K. and Liang, D. and Lu, T. and Luo, P. and Shao, L.},
|
| 700 |
+
booktitle={ICCV},
|
| 701 |
+
pages={548-558},
|
| 702 |
+
year={2021}
|
| 703 |
+
}
|
| 704 |
+
@InProceedings{Graham2021levit,
|
| 705 |
+
author = {Graham, B. and El-Nouby, A. and Touvron, H. and Stock, P. and Joulin, A. and Jegou, H. and Douze, M.},
|
| 706 |
+
title = {{LeViT}: A Vision Transformer in ConvNet's Clothing for Faster Inference},
|
| 707 |
+
booktitle = {ICCV},
|
| 708 |
+
year = {2021},
|
| 709 |
+
pages = {12259-12269}
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
@InProceedings{Touvron2021deit,
|
| 714 |
+
title = {Training data-efficient image transformers & distillation through attention},
|
| 715 |
+
author = {T. Hugo and C. Matthieu and D. Matthijs and M. Francisco and S. Alexandre and J. Herve},
|
| 716 |
+
booktitle = {ICML},
|
| 717 |
+
pages = {10347--10357},
|
| 718 |
+
year = {2021},
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
@INPROCEEDINGS{icdar2013, author={D. KaratzasAU and F. ShafaitAU and S. UchidaAU and M. IwamuraAU and L. G. i. BigordaAU and S. R. MestreAU and J. MasAU and D. F. MotaAU and J. A. AlmazànAU and L. P. de las Heras},
|
| 723 |
+
booktitle={ICDAR},
|
| 724 |
+
title={{ICDAR} 2013 Robust Reading Competition}, year={2013}, pages={1484-1493}, doi={10.1109/ICDAR.2013.221}
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
@inproceedings{shi2016rare,
|
| 728 |
+
title={Robust scene text recognition with automatic rectification},
|
| 729 |
+
author={Shi, B. and Wang, X. and Lyu, P. and Yao, C. and Bai, X.},
|
| 730 |
+
booktitle={CVPR},
|
| 731 |
+
pages={4168--4176},
|
| 732 |
+
year={2016}
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
@inproceedings{tang2021vst,
|
| 736 |
+
title={{Visual-Semantic} Transformer for Scene Text Recognition},
|
| 737 |
+
author={Tang, X. and Lai, Y. and Liu, Y. and Fu, Y. and Fang, R.},
|
| 738 |
+
booktitle={BMCV},
|
| 739 |
+
pages = {772},
|
| 740 |
+
year={2022}
|
| 741 |
+
}
|
| 742 |
+
@INPROCEEDINGS{icdar2015, author={Karatzas, D. and Gomez-Bigorda, L. and Nicolaou, A. and Ghosh, S. and Bagdanov, A. and Iwamura, M. and Matas, J. and Neumann, L. and Chandrasekhar, V. R. and Lu, S. and Shafait, F. and Uchida, S. and Valveny, E.}, booktitle={ICDAR}, title={{ICDAR} 2015 competition on Robust Reading}, year={2015}, pages={1156-1160}, doi={10.1109/ICDAR.2015.7333942}}
|
| 743 |
+
|
| 744 |
+
@inproceedings{IIIT5K,
|
| 745 |
+
title={Scene Text Recognition using Higher Order Language Priors},
|
| 746 |
+
author={A. Mishra and A. Karteek and C. V. Jawahar},
|
| 747 |
+
booktitle={BMVC},
|
| 748 |
+
pages={1-11},
|
| 749 |
+
year={2012}
|
| 750 |
+
}
|
| 751 |
+
|
| 752 |
+
@INPROCEEDINGS{SVTP, author={Phan, T. Q. and Shivakumara, P. and Tian, S. and Tan, C. L.}, booktitle={CVPR}, title={Recognizing Text with Perspective Distortion in Natural Scenes}, year={2013}, pages={569-576}, doi={10.1109/ICCV.2013.76}}
|
| 753 |
+
|
| 754 |
+
@inproceedings{Yang2019SymmetryConstrainedRN,
|
| 755 |
+
title={{Symmetry-Constrained} Rectification Network for Scene Text Recognition},
|
| 756 |
+
author={M. Yang and Y. Guan and M. Liao and X. He and K. Bian and S. Bai and C. Yao and X. Bai},
|
| 757 |
+
journal={ICCV)},
|
| 758 |
+
year={2019},
|
| 759 |
+
pages={9146-9155}
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
@article{simonyan2014VGG,
|
| 763 |
+
title={Very deep convolutional networks for large-scale image recognition},
|
| 764 |
+
author={Simonyan, K. and Zisserman, A.},
|
| 765 |
+
journal = {CoRR},
|
| 766 |
+
volume = {abs/1409.1556},
|
| 767 |
+
eprinttype = {arXiv},
|
| 768 |
+
year={2014}
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
@inproceedings{he2016resnet,
|
| 772 |
+
title={Deep residual learning for image recognition},
|
| 773 |
+
author={He, K. and Zhang, X. and Ren, S. and Sun, J.},
|
| 774 |
+
booktitle={CVPR},
|
| 775 |
+
pages={770--778},
|
| 776 |
+
year={2016},
|
| 777 |
+
}
|
| 778 |
+
|
| 779 |
+
@article{howard2017mobilenets,
|
| 780 |
+
title={{MobileNets}: Efficient convolutional neural networks for mobile vision applications},
|
| 781 |
+
author={Howard, A. G and Zhu, M. and Chen, B. and Kalenichenko, D. and Wang, W. and Weyand, T. and Andreetto, M. and Adam, H.},
|
| 782 |
+
journal = {CoRR},
|
| 783 |
+
volume = {abs/1704.04861},
|
| 784 |
+
eprinttype = {arXiv},
|
| 785 |
+
year={2017}
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
@inproceedings{tan2019efficientnet,
|
| 789 |
+
title={{EfficientNet}: Rethinking model scaling for convolutional neural networks},
|
| 790 |
+
author={Tan, M. and Le, Q.},
|
| 791 |
+
booktitle={ICML},
|
| 792 |
+
pages={6105--6114},
|
| 793 |
+
year={2019},
|
| 794 |
+
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
@InProceedings{Liu_2021_ICCV,
|
| 798 |
+
author = {Liu, Z. and Lin, Y. and Cao, Y. and Hu, H. and Wei, Y. and Zhang, Z. and Lin, S. and Guo, B.},
|
| 799 |
+
title = {{Swin Transformer}: Hierarchical Vision Transformer Using Shifted Windows},
|
| 800 |
+
booktitle = {ICCV},
|
| 801 |
+
year = {2021},
|
| 802 |
+
pages = {10012-10022}
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
@inproceedings{vaswani2021scaling,
|
| 806 |
+
title={Scaling local self-attention for parameter efficient visual backbones},
|
| 807 |
+
author={Vaswani, A. and Ramachandran, P. and Srinivas, A. and Parmar, N. and Hechtman, B. and Shlens, J.},
|
| 808 |
+
booktitle={CVPR},
|
| 809 |
+
pages={12894--12904},
|
| 810 |
+
year={2021}
|
| 811 |
+
}
|
| 812 |
+
|
| 813 |
+
@INPROCEEDINGS{Wang2011SVT, author={Wang, K. and Babenko, B. and Belongie, S.}, booktitle={ICCV}, title={End-to-end scene text recognition}, year={2011}, pages={1457-1464}, doi={10.1109/ICCV.2011.6126402}}
|
| 814 |
+
|
| 815 |
+
@article{jaderberg14synthetic,
|
| 816 |
+
author = {M. Jaderberg and K. Simonyan and A. Vedaldi and A. Zisserman},
|
| 817 |
+
journal = {CoRR},
|
| 818 |
+
volume = {abs/1406.2227},
|
| 819 |
+
year = {2014},
|
| 820 |
+
title = {Synthetic Data and Artificial Neural Networks for Natural Scene Text Recognition},
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
@article{Jader2015Reading,
|
| 824 |
+
author = {\vspace{0mm}M. Jaderberg and K. Simonyan and A. Vedaldi and A. Zisserman},
|
| 825 |
+
title = {Reading Text in the Wild with Convolutional Neural Networks},
|
| 826 |
+
journal = {Int. J. Comput. Vis.},
|
| 827 |
+
volume = {116},
|
| 828 |
+
number = {1},
|
| 829 |
+
pages = {1--20},
|
| 830 |
+
year = {2015}
|
| 831 |
+
}
|
| 832 |
+
|
| 833 |
+
@inproceedings{radford2021learning,
|
| 834 |
+
title={Learning transferable visual models from natural language supervision},
|
| 835 |
+
author={Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and others},
|
| 836 |
+
booktitle={ICML},
|
| 837 |
+
pages={8748--8763},
|
| 838 |
+
year={2021},
|
| 839 |
+
}
|
| 840 |
+
|
| 841 |
+
@article{awais2023foundational,
|
| 842 |
+
title={Foundational models defining a new era in vision: A survey and outlook},
|
| 843 |
+
author={Awais, Muhammad and Naseer, Muzammal and Khan, Salman and Anwer, Rao Muhammad and Cholakkal, Hisham and Shah, Mubarak and Yang, Ming-Hsuan and Khan, Fahad Shahbaz},
|
| 844 |
+
journal = {CoRR},
|
| 845 |
+
volume = {abs/2307.13721},
|
| 846 |
+
eprinttype = {arXiv},
|
| 847 |
+
year={2023}
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
@inproceedings{Synthetic,
|
| 851 |
+
author = {Gupta, A. and Vedaldi, A. and Zisserman, A.},
|
| 852 |
+
year = {2016},
|
| 853 |
+
pages = {2315-2324},
|
| 854 |
+
title = {Synthetic Data for Text Localisation in Natural Images},
|
| 855 |
+
booktitle={CVPR},
|
| 856 |
+
}
|
| 857 |
+
@article{Risnumawan2014cute,
|
| 858 |
+
author = {Anhar, R. and Palaiahnakote, S. and Chan, C. S. and Tan, C. L.},
|
| 859 |
+
year = {2014},
|
| 860 |
+
title = {A robust arbitrary text detection system for natural scene images},
|
| 861 |
+
volume={41},
|
| 862 |
+
number={18},
|
| 863 |
+
journal = {Expert Syst. Appl.},
|
| 864 |
+
pages = {8027–8048}
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
@inproceedings{Fedor2018Rosetta,
|
| 868 |
+
author = {Borisyuk, F. and Gordo, A. and Sivakumar, V.},
|
| 869 |
+
title = {Rosetta: Large Scale System for Text Detection and Recognition in Images},
|
| 870 |
+
year = {2018},
|
| 871 |
+
booktitle = {ACM SIGKDD},
|
| 872 |
+
pages = {71–79}
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
@article{atienza2021vitstr, author={Atienza, R.},
|
| 876 |
+
title={Vision Transformer for Fast and Efficient Scene Text Recognition},
|
| 877 |
+
journal = {CoRR},
|
| 878 |
+
volume = {abs/2105.08582},
|
| 879 |
+
eprinttype = {arXiv},
|
| 880 |
+
year={2021}}
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
@InProceedings{Wang_2021_visionlan,
|
| 884 |
+
author = {Wang, Y. and Xie, H. and Fang, S. and Wang, J. and Zhu, S. and Zhang, Y.},
|
| 885 |
+
title = {{From Two to One}: A New Scene Text Recognizer With Visual Language Modeling Network},
|
| 886 |
+
booktitle = {ICCV},
|
| 887 |
+
year = {2021},
|
| 888 |
+
pages = {14194-14203}
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
@inproceedings{Qiao2020SEEDSE,
|
| 892 |
+
title={{SEED}: Semantics Enhanced Encoder-Decoder Framework for Scene Text Recognition},
|
| 893 |
+
author={Z. Qiao and Y. Zhou and D. Yang and Y. Zhou and W. Wang},
|
| 894 |
+
booktitle={CVPR},
|
| 895 |
+
year={2020},
|
| 896 |
+
pages={13525-13534}
|
| 897 |
+
}
|
| 898 |
+
@inproceedings{Baek2019WhatIW,
|
| 899 |
+
title={What Is Wrong With Scene Text Recognition Model Comparisons? Dataset and Model Analysis},
|
| 900 |
+
author={Baek, J. and Kim, G. and Lee, J. and Park, S. and Han, D. and Yun, S. and Oh, S. J. and Lee, H.},
|
| 901 |
+
booktitle={ICCV},
|
| 902 |
+
year={2019},
|
| 903 |
+
pages={4714-4722}
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
@InProceedings{Rang_2024_CVPR_clip4str,
|
| 907 |
+
author = {Rang, M. and Bi, Z. and Liu, C. and Wang, Y. and Han, K.},
|
| 908 |
+
title = {An Empirical Study of Scaling Law for Scene Text Recognition},
|
| 909 |
+
booktitle = {CVPR},
|
| 910 |
+
month = {June},
|
| 911 |
+
year = {2024},
|
| 912 |
+
pages = {15619-15629}
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
@inproceedings{WooDHC0KX23_ConvNeXtv2,
|
| 916 |
+
author = {S. Woo and
|
| 917 |
+
S. Debnath and
|
| 918 |
+
R. Hu and
|
| 919 |
+
X. Chen and
|
| 920 |
+
Z. Liu and
|
| 921 |
+
I. S. Kweon and
|
| 922 |
+
S. Xie},
|
| 923 |
+
title = {ConvNeXt {V2:} Co-designing and Scaling ConvNets with Masked Autoencoders},
|
| 924 |
+
booktitle = {CVPR},
|
| 925 |
+
pages = {16133--16142},
|
| 926 |
+
year = {2023},
|
| 927 |
+
}
|
| 928 |
+
|
| 929 |
+
@inproceedings{Zhai2016Chpr,
|
| 930 |
+
title={Chinese Image Text Recognition with BLSTM-CTC: A Segmentation-Free Method},
|
| 931 |
+
author={Zhai, C. and Chen, Z. and Li, J. and Xu, B.},
|
| 932 |
+
booktitle={CCPR},
|
| 933 |
+
year={2016},
|
| 934 |
+
pages={525-536}
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
@article{chen2021benchmarking,
|
| 938 |
+
title={Benchmarking Chinese Text Recognition: Datasets, Baselines, and an Empirical Study},
|
| 939 |
+
author={Chen, J. and Yu, H. and Ma, J. and Guan, M. and Xu, X. and Wang, X. and Qu, S. and Li,B. and Xue, X.},
|
| 940 |
+
primaryClass={cs.CV},
|
| 941 |
+
journal = {CoRR},
|
| 942 |
+
volume = {abs/2112.15093},
|
| 943 |
+
eprinttype = {arXiv},
|
| 944 |
+
year={2021}
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
@inproceedings{du2024smtr,
|
| 948 |
+
title={Out of Length Text Recognition with Sub-String Matching},
|
| 949 |
+
author={Y. Du and Z. Chen and C. Jia and X. Gao and Y.-G. Jiang},
|
| 950 |
+
year={2025},
|
| 951 |
+
booktitle={AAAI},
|
| 952 |
+
pages={2798-2806},
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
@article{chen2021text,
|
| 956 |
+
title={Text recognition in the wild: A survey},
|
| 957 |
+
author={Chen, X. and Jin, L. and Zhu, Y. and Luo, C. and Wang, T.},
|
| 958 |
+
journal = {{ACM} Comput. Surv.},
|
| 959 |
+
volume = {54},
|
| 960 |
+
number = {2},
|
| 961 |
+
pages = {42:1--42:35},
|
| 962 |
+
year = {2022},
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
@inproceedings{zheng2023tps++,
|
| 966 |
+
title={{TPS++}: Attention-Enhanced Thin-Plate Spline for Scene Text Recognition},
|
| 967 |
+
author={Zheng, T. and Chen, Z. and Bai, J. and Xie, H. and Jiang, Y.-G.},
|
| 968 |
+
booktitle={IJCAI},
|
| 969 |
+
pages={1777--1785},
|
| 970 |
+
year={2023}
|
| 971 |
+
}
|
| 972 |
+
|
| 973 |
+
@inproceedings{jiang2023revisiting,
|
| 974 |
+
title={Revisiting Scene Text Recognition: A Data Perspective},
|
| 975 |
+
author={Q. Jiang and J. Wang and D. Peng and C. Liu and L. Jin},
|
| 976 |
+
booktitle={ICCV},
|
| 977 |
+
pages={20486-20497},
|
| 978 |
+
year={2023},
|
| 979 |
+
}
|
| 980 |
+
|
| 981 |
+
@inproceedings{singh2021textocr,
|
| 982 |
+
title={{TextOCR}: Towards large-scale end-to-end reasoning for arbitrary-shaped scene text},
|
| 983 |
+
author={Singh, A. and Pang, G. and Toh, M. and Huang, J. and Galuba, W. and Hassner, T.},
|
| 984 |
+
booktitle={CVPR},
|
| 985 |
+
pages={8802--8812},
|
| 986 |
+
year={2021}
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
@inproceedings{chng2019icdar2019art,
|
| 990 |
+
title={{ICDAR2019} robust reading challenge on arbitrary-shaped text-rrc-art},
|
| 991 |
+
author={Chng, C. and Liu, Y. and Sun, Y. and Ng, C. and Luo, C. and N. and Fang, C. and Zhang, S. and Han, J. and Ding, E. and others},
|
| 992 |
+
booktitle={ICDAR},
|
| 993 |
+
pages={1571--1576},
|
| 994 |
+
year={2019}
|
| 995 |
+
}
|
| 996 |
+
|
| 997 |
+
@inproceedings{sun2019icdarlsvt,
|
| 998 |
+
title={{ICDAR} 2019 competition on large-scale street view text with partial labeling-RRC-LSVT},
|
| 999 |
+
author={Sun, Y. and Ni, Z. and Chng, C. and Liu, Y. and Luo, C. and Ng, C. and Han, J. and Ding, E. and Liu, J. and Karatzas, D. and others},
|
| 1000 |
+
booktitle={ICDAR},
|
| 1001 |
+
pages={1557--1562},
|
| 1002 |
+
year={2019}
|
| 1003 |
+
}
|
| 1004 |
+
|
| 1005 |
+
@article{wu2024building,
|
| 1006 |
+
title={Building an Open-Vocabulary Video CLIP Model With Better Architectures, Optimization and Data},
|
| 1007 |
+
author={Wu, Zuxuan and Weng, Zejia and Peng, Wujian and Yang, Xitong and Li, Ang and Davis, Larry S and Jiang, Yu-Gang},
|
| 1008 |
+
journal={IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 1009 |
+
year={2024}
|
| 1010 |
+
}
|
| 1011 |
+
|
| 1012 |
+
@inproceedings{zheng2023mrn,
|
| 1013 |
+
title={MRN: Multiplexed routing network for incremental multilingual text recognition},
|
| 1014 |
+
author={Zheng, T. and Chen, Z. and Huang, B. and Zhang, W. and Jiang, Y.-G.},
|
| 1015 |
+
booktitle={ICCV},
|
| 1016 |
+
pages={18644--18653},
|
| 1017 |
+
year={2023}
|
| 1018 |
+
}
|
| 1019 |
+
|
| 1020 |
+
@inproceedings{yang2022review,
|
| 1021 |
+
title={A review of natural scene text detection methods},
|
| 1022 |
+
author={Yang, Lingqian and Ergu, Daji and Cai, Ying and Liu, Fangyao and Ma, Bo},
|
| 1023 |
+
booktitle={ITQM},
|
| 1024 |
+
pages={1458--1465},
|
| 1025 |
+
year={2022},
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
@article{yu2024turning,
|
| 1029 |
+
title={Turning a clip model into a scene text spotter},
|
| 1030 |
+
author={Yu, Wenwen and Liu, Yuliang and Zhu, Xingkui and Cao, Haoyu and Sun, Xing and Bai, Xiang},
|
| 1031 |
+
journal={IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 1032 |
+
year={2024},
|
| 1033 |
+
publisher={IEEE}
|
| 1034 |
+
}
|
| 1035 |
+
|
| 1036 |
+
|
| 1037 |
+
@inproceedings{krylov2021openintelocr,
|
| 1038 |
+
title={Open images v5 text annotation and yet another mask text spotter},
|
| 1039 |
+
author={Krylov, I. and Nosov, S. and Sovrasov, V.},
|
| 1040 |
+
booktitle={Asian Conference on Machine Learning},
|
| 1041 |
+
pages={379--389},
|
| 1042 |
+
year={2021},
|
| 1043 |
+
organization={PMLR}
|
| 1044 |
+
}
|
| 1045 |
+
|
| 1046 |
+
@article{yuliang2017detectingctw1500,
|
| 1047 |
+
title={Detecting curve text in the wild: New dataset and new solution},
|
| 1048 |
+
author={Y. Liu and L. Jin and S. Zhang and S. Zhang},
|
| 1049 |
+
journal = {CoRR},
|
| 1050 |
+
volume = {abs/1712.02170},
|
| 1051 |
+
eprinttype = {arXiv},
|
| 1052 |
+
year={2017}
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
@inproceedings{nagy2012neocr,
|
| 1056 |
+
title={{NEOCR}: A configurable dataset for natural image text recognition},
|
| 1057 |
+
author={Nagy, R. and Dicker, A. and Meyer-Wegener, K.},
|
| 1058 |
+
booktitle={Camera-Based Document Analysis and Recognition},
|
| 1059 |
+
pages={150--163},
|
| 1060 |
+
year={2012},
|
| 1061 |
+
}
|
| 1062 |
+
|
| 1063 |
+
@article{jung2011touchkaist,
|
| 1064 |
+
title={{Touch TT}: Scene text extractor using touchscreen interface},
|
| 1065 |
+
author={Jung, J. and Lee, S. and Cho, M. and Kim, J.},
|
| 1066 |
+
journal={ETRI},
|
| 1067 |
+
pages={78--88},
|
| 1068 |
+
year={2011},
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
@inproceedings{zhang2019icdarrects,
|
| 1073 |
+
title={{ICDAR} 2019 robust reading challenge on reading chinese text on signboard},
|
| 1074 |
+
author={Zhang, R. and Zhou, Y. and Jiang, Q. and Song, Q. and Li, N. and Zhou, K. and Wang, L. and Wang, D. and Liao, M. and Yang, M. and others},
|
| 1075 |
+
booktitle={ICDAR},
|
| 1076 |
+
pages={1577--1581},
|
| 1077 |
+
year={2019}
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
@article{veit2016cocotext,
|
| 1081 |
+
title={Coco-text: Dataset and benchmark for text detection and recognition in natural images},
|
| 1082 |
+
author={Veit, A. and Matera, T. and Neumann, L. and Matas, J. and Belongie, S.},
|
| 1083 |
+
journal = {CoRR},
|
| 1084 |
+
volume = {abs/1601.07140},
|
| 1085 |
+
eprinttype = {arXiv},
|
| 1086 |
+
year={2016}
|
| 1087 |
+
}
|
| 1088 |
+
|
| 1089 |
+
@inproceedings{zhang2017ubertext,
|
| 1090 |
+
title={Uber-text: A large-scale dataset for optical character recognition from street-level imagery},
|
| 1091 |
+
author={Zhang, Y. and Gueguen, L. and Zharkov, I. and Zhang, P. and Seifert, K. and Kadlec, B.n},
|
| 1092 |
+
booktitle={Scene Understanding Workshop-CVPR},
|
| 1093 |
+
pages={5},
|
| 1094 |
+
year={2017}
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
@inproceedings{he2018icpr2018mtwi,
|
| 1098 |
+
title={{ICPR2018} contest on robust reading for multi-type web images},
|
| 1099 |
+
author={He, M. and Liu, Y. and Yang, Z. and Zhang, S. and Luo, C. and Gao, F. and Zheng, Q. and Wang, Y. and Zhang, X. and Jin, L.},
|
| 1100 |
+
booktitle={ICPR},
|
| 1101 |
+
pages={7--12},
|
| 1102 |
+
year={2018}
|
| 1103 |
+
}
|
| 1104 |
+
|
| 1105 |
+
@inproceedings{mathew2017benchmarkingiiitilst,
|
| 1106 |
+
title={Benchmarking scene text recognition in Devanagari, Telugu and Malayalam},
|
| 1107 |
+
author={Mathew, M. and Jain, M. and Jawahar, C.},
|
| 1108 |
+
booktitle={ICDAR},
|
| 1109 |
+
volume={7},
|
| 1110 |
+
pages={42--46},
|
| 1111 |
+
year={2017}
|
| 1112 |
+
}
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
@inproceedings{nayef2019icdar2019mlt19,
|
| 1116 |
+
title={{ICDAR2019} robust reading challenge on multi-lingual scene text detection and recognition—RRC-MLT-2019},
|
| 1117 |
+
author={Nayef, N. and Patel, Y. and Busta, M. and Chowdhury, P. and Karatzas, D. and Khlif, W. and Matas, J. and Pal, U. and Burie, J. and Liu, C. and others},
|
| 1118 |
+
booktitle={ICDAR)},
|
| 1119 |
+
pages={1582--1587},
|
| 1120 |
+
year={2019}
|
| 1121 |
+
}
|
| 1122 |
+
|
| 1123 |
+
@inproceedings{long2022towardshiertext,
|
| 1124 |
+
title={Towards end-to-end unified scene text detection and layout analysis},
|
| 1125 |
+
author={Long, S. and Qin, S. and Panteleev, D. and Bissacco, A. and Fujii, Y. and Raptis, M.},
|
| 1126 |
+
booktitle={CVPR},
|
| 1127 |
+
pages={1049--1059},
|
| 1128 |
+
year={2022}
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
@article{arxiv2019_2dctc,
|
| 1132 |
+
author = {Z. Wan and
|
| 1133 |
+
F. Xie and
|
| 1134 |
+
Y. Liu and
|
| 1135 |
+
X. Bai and
|
| 1136 |
+
C. Yao},
|
| 1137 |
+
title = {2D-CTC for Scene Text Recognition},
|
| 1138 |
+
journal = {CoRR},
|
| 1139 |
+
volume = {abs/1907.09705},
|
| 1140 |
+
year = {2019},
|
| 1141 |
+
eprinttype = {arXiv},
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
@inproceedings{Yang_2022_acmmm_dig,
|
| 1145 |
+
author = {Yang, M. and Liao, M. and Lu, P. and Wang, J. and Zhu, S. and Luo, H. and Tian, Q. and Bai, X.},
|
| 1146 |
+
title = {{Reading and Writing}: Discriminative and Generative Modeling for Self-Supervised Text Recognition},
|
| 1147 |
+
year = {2022},
|
| 1148 |
+
booktitle = {ACM MM},
|
| 1149 |
+
pages = {4214-4223},
|
| 1150 |
+
}
|
| 1151 |
+
|
| 1152 |
+
@article{du2023cppd,
|
| 1153 |
+
title={Context Perception Parallel Decoder for Scene Text Recognition},
|
| 1154 |
+
author={Y. Du and Z. Chen and C. Jia and X. Yin and C. Li and Y. Du and Y.-G. Jiang},
|
| 1155 |
+
year={2025},
|
| 1156 |
+
doi={10.1109/TPAMI.2025.3545453},
|
| 1157 |
+
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
|
| 1158 |
+
volume = {47},
|
| 1159 |
+
number = {6},
|
| 1160 |
+
pages = {4668--4683},
|
| 1161 |
+
}
|
| 1162 |
+
|
| 1163 |
+
@inproceedings{shi2017icdar2017rctw,
|
| 1164 |
+
title={{ICDAR2017} competition on reading chinese text in the wild (rctw-17)},
|
| 1165 |
+
author={Shi, B. and Yao, C. and Liao, M. and Yang, M and Xu, P and Cui, L and Belongie, S. and Lu, S. and Bai, X.},
|
| 1166 |
+
booktitle={ICDAR},
|
| 1167 |
+
pages={1429--1434},
|
| 1168 |
+
year={2017}
|
| 1169 |
+
}
|
| 1170 |
+
|
| 1171 |
+
@inproceedings{zhong_2024_acmmm_vlreader,
|
| 1172 |
+
author = {Zhong, H. and Yang, Z. and Li, Z. and Wang, P. and Tang, J. and Cheng, W. and Yao, C.},
|
| 1173 |
+
title = {{VL-Reader}: Vision and Language Reconstructor is an Effective Scene Text Recognizer},
|
| 1174 |
+
year = {2024},
|
| 1175 |
+
booktitle = {ACM MM},
|
| 1176 |
+
pages = {4207-4216},
|
| 1177 |
+
}
|
| 1178 |
+
|
| 1179 |
+
@InProceedings{Zhao_2024_CVPR_E2STR,
|
| 1180 |
+
author = {Zhao, Z. and Tang, J. and Lin, C. and Wu, B. and Huang, C. and Liu, H. and Tan, X. and Zhang, Z. and Xie, Y.},
|
| 1181 |
+
title = {Multi-modal In-Context Learning Makes an Ego-evolving Scene Text Recognizer},
|
| 1182 |
+
booktitle = {CVPR},
|
| 1183 |
+
year = {2024},
|
| 1184 |
+
pages = {15567-15576}
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
@ARTICLE{zhao_2025_tip_clip4str,
|
| 1188 |
+
author={Zhao, S. and Quan, R. and Zhu, L. and Yang, Y.},
|
| 1189 |
+
journal={IEEE Trans. Image Process.},
|
| 1190 |
+
title={{CLIP4STR}: A Simple Baseline for Scene Text Recognition With Pre-Trained Vision-Language Model},
|
| 1191 |
+
year={2024},
|
| 1192 |
+
volume={33},
|
| 1193 |
+
pages={6893-6904}}
|
| 1194 |
+
|
| 1195 |
+
@inproceedings{zhao_2024_acmmm_dptr,
|
| 1196 |
+
author = {Zhao, S. and Du, Y. and Chen, Z. and Jiang, Y.-G.},
|
| 1197 |
+
title = {Decoder Pre-Training with only Text for Scene Text Recognition},
|
| 1198 |
+
year = {2024},
|
| 1199 |
+
isbn = {9798400706868},
|
| 1200 |
+
booktitle = {ACM MM},
|
| 1201 |
+
pages = {5191-5200},
|
| 1202 |
+
}
|
| 1203 |
+
|
| 1204 |
+
@inproceedings{HeCXLDG22_mae,
|
| 1205 |
+
author = {K. He and
|
| 1206 |
+
X. Chen and
|
| 1207 |
+
S. Xie and
|
| 1208 |
+
Y. Li and
|
| 1209 |
+
P. Doll{\'{a}}r and
|
| 1210 |
+
R. B. Girshick},
|
| 1211 |
+
title = {Masked Autoencoders Are Scalable Vision Learners},
|
| 1212 |
+
booktitle = {CVPR},
|
| 1213 |
+
pages = {15979--15988},
|
| 1214 |
+
year = {2022}
|
| 1215 |
+
}
|
| 1216 |
+
|
| 1217 |
+
@inproceedings{Bao0PW22_beit,
|
| 1218 |
+
author = {H. Bao and
|
| 1219 |
+
L. Dong and
|
| 1220 |
+
S. Piao and
|
| 1221 |
+
F. Wei},
|
| 1222 |
+
title = {{BEiT}: {BERT} Pre-Training of Image Transformers},
|
| 1223 |
+
booktitle = {ICLR},
|
| 1224 |
+
year = {2022}
|
| 1225 |
+
}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/main.tex
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% ICCV 2025 Paper Template
|
| 2 |
+
|
| 3 |
+
\documentclass[10pt,twocolumn,letterpaper]{article}
|
| 4 |
+
|
| 5 |
+
%%%%%%%%% PAPER TYPE - PLEASE UPDATE FOR FINAL VERSION
|
| 6 |
+
\usepackage{iccv} % To produce the CAMERA-READY version
|
| 7 |
+
% \usepackage[review]{iccv} % To produce the REVIEW version
|
| 8 |
+
% \usepackage[pagenumbers]{iccv} % To force page numbers, e.g. for an arXiv version
|
| 9 |
+
\usepackage{multirow}
|
| 10 |
+
\usepackage{colortbl}
|
| 11 |
+
% Import additional packages in the preamble file, before hyperref
|
| 12 |
+
\input{preamble}
|
| 13 |
+
% \usepackage[accsupp]{axessibility} % Improves PDF readability for those with disabilities.
|
| 14 |
+
% It is strongly recommended to use hyperref, especially for the review version.
|
| 15 |
+
% hyperref with option pagebackref eases the reviewers' job.
|
| 16 |
+
% Please disable hyperref *only* if you encounter grave issues,
|
| 17 |
+
% e.g. with the file validation for the camera-ready version.
|
| 18 |
+
%
|
| 19 |
+
% If you comment hyperref and then uncomment it, you should delete *.aux before re-running LaTeX.
|
| 20 |
+
% (Or just hit 'q' on the first LaTeX run, let it finish, and you should be clear).
|
| 21 |
+
|
| 22 |
+
% \usepackage{CJKutf8}
|
| 23 |
+
\usepackage[ruled,vlined]{algorithm2e}
|
| 24 |
+
\definecolor{commentcolor}{RGB}{110,154,155} % define comment color
|
| 25 |
+
\newcommand{\PyComment}[1]{\ttfamily\textcolor{commentcolor}{\# #1}} % add a "#" before the input text "#1"
|
| 26 |
+
\newcommand{\PyCode}[1]{\ttfamily\textcolor{black}{#1}} % \ttfamily is the code font
|
| 27 |
+
% \newcommand{\ch}[1]{\begin{CJK}{UTF8}{gbsn}{#1}\end{CJK}}
|
| 28 |
+
\usepackage{listings}
|
| 29 |
+
\lstset{
|
| 30 |
+
basicstyle=\ttfamily,
|
| 31 |
+
breaklines=true,
|
| 32 |
+
columns=fullflexible,
|
| 33 |
+
commentstyle=\color{gray}\textit,
|
| 34 |
+
frame=single,
|
| 35 |
+
language=Python,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
\definecolor{iccvblue}{rgb}{0.21,0.49,0.74}
|
| 39 |
+
\usepackage[pagebackref,breaklinks,colorlinks,allcolors=iccvblue]{hyperref}
|
| 40 |
+
|
| 41 |
+
%%%%%%%%% PAPER ID - PLEASE UPDATE
|
| 42 |
+
\def\paperID{6403} % * Enter the Paper ID here
|
| 43 |
+
\def\confName{ICCV}
|
| 44 |
+
\def\confYear{2025}
|
| 45 |
+
|
| 46 |
+
%%%%%%%%% TITLE - PLEASE UPDATE
|
| 47 |
+
\title{SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition}
|
| 48 |
+
|
| 49 |
+
%%%%%%%%% AUTHORS - PLEASE UPDATE
|
| 50 |
+
% \author{First Author\\
|
| 51 |
+
% Institution1\\
|
| 52 |
+
% Institution1 address\\
|
| 53 |
+
% {\tt\small firstauthor@i1.org}
|
| 54 |
+
% % For a paper whose authors are all at the same institution,
|
| 55 |
+
% % omit the following lines up until the closing ``}''.
|
| 56 |
+
% % Additional authors and addresses can be added with ``\and'',
|
| 57 |
+
% % just like the second author.
|
| 58 |
+
% % To save space, use either the email address or home page, not both
|
| 59 |
+
% % \and
|
| 60 |
+
% % Second Author\\
|
| 61 |
+
% % Institution2\\
|
| 62 |
+
% % First line of institution2 address\\
|
| 63 |
+
% % {\tt\small secondauthor@i2.org}
|
| 64 |
+
% }
|
| 65 |
+
|
| 66 |
+
\author{
|
| 67 |
+
Yongkun Du$^{1}$, Zhineng Chen$^{1}\thanks{Corresponding Author}$, Hongtao Xie$^{2}$, Caiyan Jia$^{3}$, Yu-Gang Jiang$^{1}$ \\
|
| 68 |
+
% \affiliations
|
| 69 |
+
%Shanghai Collaborative Innovation Center of Intelligent Visual Computing,
|
| 70 |
+
%$^1$College of Computer Science and Artificial Intelligence, Fudan University, China\\
|
| 71 |
+
$^1$Institute of Trustworthy Embodied AI, Fudan University, China\\
|
| 72 |
+
%and Beijing Key Lab of Traffic Data Analysis and Mining
|
| 73 |
+
$^2$School of Information Science and Technology, USTC, China \\
|
| 74 |
+
$^3$School of Computer Science and Technology, Beijing Jiaotong University, China\\
|
| 75 |
+
% \emails
|
| 76 |
+
% $^{*}$Corresponding author: zhinchen@fudan.edu.cn
|
| 77 |
+
% }
|
| 78 |
+
{\tt\small
|
| 79 |
+
ykdu23@m.fudan.edu.cn,
|
| 80 |
+
\{zhinchen, ygj\}@fudan.edu.cn,
|
| 81 |
+
htxie@ustc.edu.cn,
|
| 82 |
+
cyjia@bjtu.edu.cn}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
\begin{document}
|
| 86 |
+
\maketitle
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
\begin{abstract}
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
Connectionist temporal classification (CTC)-based scene text recognition (STR) methods, e.g., SVTR, are widely employed in OCR applications, mainly due to their simple architecture, which only contains a visual model and a CTC-aligned linear classifier, and therefore fast inference. However, they generally exhibit worse accuracy than encoder-decoder-based methods (EDTRs) due to struggling with text irregularity and linguistic missing. To address these challenges, we propose SVTRv2, a CTC model endowed with the ability to handle text irregularities and model linguistic context. First, a multi-size resizing strategy is proposed to resize text instances to appropriate predefined sizes, effectively avoiding severe text distortion. Meanwhile, we introduce a feature rearrangement module to ensure that visual features accommodate the requirement of CTC, thus alleviating the alignment puzzle. Second, we propose a semantic guidance module. It integrates linguistic context into the visual features, allowing CTC model to leverage language information for accuracy improvement. This module can be omitted at the inference stage and would not increase the time cost. We extensively evaluate SVTRv2 in both standard and recent challenging benchmarks, where SVTRv2 is fairly compared to popular STR models across multiple scenarios, including different types of text irregularity, languages, long text, and whether employing pretraining. SVTRv2 surpasses most EDTRs across the scenarios in terms of accuracy and inference speed. Code: \url{https://github.com/Topdu/OpenOCR}.
|
| 93 |
+
|
| 94 |
+
\end{abstract}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
\section{Introduction}
|
| 98 |
+
\label{sec:intro}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
\begin{figure}
|
| 103 |
+
\centering
|
| 104 |
+
\includegraphics[width=0.48\textwidth]{SVTRv2_fig1.pdf}
|
| 105 |
+
\caption{\textbf{Top}: comparison with previous methods~\cite{duijcai2022svtr,du2023cppd,BautistaA22PARSeq,jiang2023revisiting,iccv2023lister} best in a single scenario, where long text recognition accuracy (Long) and FPS are normalized. Our SVTRv2 achieves the new state of the arts in every scenario except for FPS. Nevertheless, SVTRv2 is still the fastest compared to all the EDTRs. \textbf{Bottom}: challenges caused by text irregularity and linguistic missing.}
|
| 106 |
+
\label{fig:fig1}
|
| 107 |
+
\end{figure}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
As a task of extracting text from natural images, scene text recognition (STR) has garnered considerable interest over decades. Unlike text from scanned documents, scene text often exists within complex natural scenarios, posing challenges such as background noise, text distortions, irregular layouts, artistic fonts~\cite{ChenJZLW21str_survey}, etc. To tackle these challenges, a variety of STR methods have been developed and they can be roughly divided into two categories, i.e., connectionist temporal classification (CTC)-based methods and encoder-decoder-based methods (EDTRs).
|
| 112 |
+
|
| 113 |
+
Typically, CTC-based methods~\cite{shi2017crnn,duijcai2022svtr,hu2020gtc,ppocrv3} employ a single visual model to extract image features and then apply a CTC-aligned linear classifier~\cite{CTC} to predict recognition results. This straightforward architecture provides advantages such as fast inference, which makes them especially popular in OCR applications. However, these models struggle to handle text irregularity, i.e., text distortions, varying layouts, etc. As a consequence, attention-based decoders are introduced as alternatives, leading to a series of EDTRs~\cite{shi2019aster,Sheng2019nrtr,pr2019MORAN,li2019sar,wang2020aaai_dan,yu2020srn,cvpr2020seed,zhang2020autostr,yue2020robustscanner,fang2021abinet,Wang_2021_visionlan,wang2022tip_PETR,BautistaA22PARSeq,mgpstr,du2023cppd,ijcai2023LPV,iccv2023lister,zheng2024cdistnet,TPAMI2022ABINetPP,yang2024class_cam,Wei_2024_busnet,Xu_2024_CVPR_OTE,levocr,xie2022toward_cornertrans,Guan_2023_CVPR_SIGA,Guan_2023_ICCV_CCD,zhou2024cff,Zhao_2024_CVPR_E2STR,zhong_2024_acmmm_vlreader,zhao_2025_tip_clip4str,zhao_2024_acmmm_dptr}. These methods exhibit superior performance in complex scenarios by leveraging multi-modal cues, including visual~\cite{Wang_2021_visionlan,ijcai2023LPV,du2023cppd,Xu_2024_CVPR_OTE}, linguistic~\cite{Sheng2019nrtr,yu2020srn,fang2021abinet,cvpr2020seed}, and positional~\cite{yue2020robustscanner,zheng2024cdistnet,iccv2023lister} ones, which are largely missed in current CTC models. As depicted in the top of Fig.~\ref{fig:fig1}, compared to SVTR~\cite{duijcai2022svtr}, a leading CTC model adopted by famous commercial OCR engines~\cite{ppocrv3}, EDTRs achieve superior results in scenarios \cite{Wang_2021_visionlan,jiang2023revisiting,chen2021benchmarking} such as curved, multi-oriented, artistic, occluded, and Chinese text.
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
The inferior accuracy of CTC models can be attributed to two primary factors. First, these models struggle with irregular text, as CTC alignment presumes that the text appears in a near canonical left-to-right order~\cite{ChenJZLW21str_survey,whatwrong}, which is not always true, particularly in complex scenarios. Second, CTC models seldom encode linguistic information, which is typically accomplished by the decoder of EDTRs. While recent advancements deal with the two issues by employing text rectification~\cite{shi2019aster,pr2019MORAN,zheng2023tps++}, developing 2D CTC~\cite{arxiv2019_2dctc}, utilizing masked image modeling~\cite{Wang_2021_visionlan,ijcai2023LPV}, etc., the accuracy gap between CTC and EDTRs remains significant, indicating that novel solutions still need to be investigated.
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
In this paper, our aim is to build more powerful CTC models by better handling text irregularity and integrating linguistic context. For the former, we address this challenge by first extracting discriminative features and then better aligning them. First, existing methods uniformly resize text images with various shapes to a fixed size before feeding into the visual model. We question the rationality of this resizing, which easily causes unnecessary text distortion, making the text difficult to read, as shown in the bottom-left of Fig.~\ref{fig:fig1}. To this end, a multi-size resizing (MSR) strategy is proposed to resize the text instance to a proper predefined size based on its aspect ratio, thus minimizing text distortion and ensuring the discrimination of the extracted visual features. Second, irregular text may be rotated significantly, and the character arrangement does not align with the reading order of the text, causing the puzzle of CTC alignment, as shown in the bottom-center example in Fig.~\ref{fig:fig1}. To solve this, we introduce a feature rearrangement module (FRM). It rearranges visual features with first a horizontal rearrangement and then a vertical rearrangement to identify and prioritize relevant features. FRM maps 2D visual features into a sequence aligned with the text's reading order, thus effectively alleviating the alignment puzzle. Consequently, CTC models integrating MSR and FRM can recognize irregular text well, without using rectification modules or attention-based decoders.
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
As for the latter, the mistakenly recognized example shown in the bottom-right of Fig.~\ref{fig:fig1} clearly highlights the necessity of integrating linguistic information. Since CTC models directly classify visual features, we have to endow the visual model with linguistic context modeling capability, which is less discussed previously. Inspired by guided training of CTC (GTC)~\cite{hu2020gtc,ppocrv3} and string matching-based recognition~\cite{du2024smtr}, we propose a semantic guidance module (SGM), a new scheme that solely leverages surrounding string context to model the target character. This approach effectively guides the visual model in capturing linguistic context. During inference, SGM can be omitted and would not increase the time cost.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
With these contributions, we develop SVTRv2, a novel CTC-based method whose recognition ability has been largely enhanced, while still maintaining a simple inference architecture and fast speed. To thoroughly validate SVTRv2, we conducted extensive and comparative experiments on benchmarks including standard regular and irregular text~\cite{whatwrong}, Union14M-Benchmark~\cite{jiang2023revisiting}, occluded scene text~\cite{Wang_2021_visionlan}, long text~\cite{du2024smtr}, and Chinese text~\cite{chen2021benchmarking}. The results demonstrate that SVTRv2 consistently outperforms all the compared EDTRs across the evaluated scenarios in terms of accuracy and speed. Moreover, a simple pretraining on SVTRv2 yields highly competitive accuracy compared to the pretraining-based EDTRs advances \cite{Zhao_2024_CVPR_E2STR,zhong_2024_acmmm_vlreader,zhao_2025_tip_clip4str,zhao_2024_acmmm_dptr}, highlighting its effectiveness and broad applicability.
|
| 134 |
+
|
| 135 |
+
In addition, recent advances~\cite{jiang2023revisiting,Rang_2024_CVPR_clip4str} indicated the importance of large-scale real-world datasets in improving STR performance. However, many STR models primarily derived from synthetic data~\cite{Synthetic,jaderberg14synthetic}, which fail to fully represent real-world complexities and lead to performance limitations, particularly on challenging scenarios. Meanwhile, we observe that existing large-scale real-word training datasets~\cite{BautistaA22PARSeq,jiang2023revisiting,Rang_2024_CVPR_clip4str} overlap with Union14M-Benchmark, causing a small overlapping between training and test data, thus the results reported in~\cite{jiang2023revisiting} should be updated. As a result, we introduce \textit{U14M-Filter}, a rigorously filtered version of the real-world training dataset \textit{Union14M-L}~\cite{jiang2023revisiting}. Then, we systematically reproduced and retrained 24 mainstream STR methods from scratch based on \textit{U14M-Filter}. These methods are thoroughly evaluated on Union14M-Benchmark. Their accuracy, model size, and inference time constitute a comprehensive and reliable new benchmark for future reference.
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
\begin{figure*}[t]
|
| 141 |
+
\centering
|
| 142 |
+
\includegraphics[width=0.98\textwidth]{SVTRv2_overview.pdf}
|
| 143 |
+
\caption{An illustrative overview of SVTRv2. The text is first resized according to multi-size resizing (MSR), then experiences feature extraction. During training both the semantic guidance module (SGM) and feature rearrangement module (FRM) are employed, which are responsible for linguistic context modeling and CTC-oriented feature rearrangement, respectively. Only FRM is retained during inference.}
|
| 144 |
+
\label{fig:svtrv2_overview}
|
| 145 |
+
\end{figure*}
|
| 146 |
+
|
| 147 |
+
\section{Related Work}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
\noindent\textbf{Irregular text recognition}~\cite{Risnumawan2014cute,SVTP,jiang2023revisiting} has posed a significant challenge in STR due to the diverse variation of text instances, where CTC-based methods~\cite{shi2017crnn,duijcai2022svtr,hu2020gtc,ppocrv3} are often less effective. To address this, some methods~\cite{shi2019aster,cvpr2020seed,zhang2020autostr,zheng2024cdistnet,yang2024class_cam,duijcai2022svtr,zheng2023tps++} incorporate rectification modules~\cite{shi2019aster,pr2019MORAN,zheng2023tps++} that aim to transform irregular text into more regular format. While more methods utilize attention-based decoders~\cite{wang2020aaai_dan,li2019sar,Sheng2019nrtr,yue2020robustscanner,du2023cppd,du2024smtr}, which employ the attention mechanism to dynamically localize characters regardless of text layout, and thus are less affected.
|
| 151 |
+
However, these methods generally have tailored training hyper-parameters. For example, rectification modules~\cite{shi2019aster,pr2019MORAN,zheng2023tps++} typically specify a fixed output image size (e.g. 32$\times$128), which is not always a suitable choice. While attention-based decoders~\cite{wang2020aaai_dan,li2019sar,Sheng2019nrtr,yue2020robustscanner,du2023cppd,du2024smtr} generally set the maximum recognition length to 25 characters, therefore, longer text cannot be recognized, as shown in Fig.~\ref{fig:case_long}.
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
\noindent\textbf{Linguistic context modeling}. There are several ways of modeling linguistic context. One major branch is auto-regressive (AR)-based STR methods~\cite{shi2019aster,wang2020aaai_dan,Sheng2019nrtr,li2019sar,jiang2023revisiting,xie2022toward_cornertrans,zheng2024cdistnet,Xu_2024_CVPR_OTE,yang2024class_cam,zhou2024cff,du2024igtr}, which utilize previously decoded characters to model contextual cues. However, their inference speed is slow due to the character-by-character decoding nature. Some other methods~\cite{yu2020srn,fang2021abinet,MATRN,BautistaA22PARSeq} integrate external language models to model linguistic context and correct the recognition results. While effective, the linguistic context is purely text-based, making it challenging to adapt them to the visual model of CTC models. There are also some studies~\cite{cvpr2020seed,Wang_2021_visionlan,ijcai2023LPV} to model linguistic context with visual information only using pretraining based on masked image modeling~\cite{HeCXLDG22_mae,Bao0PW22_beit}. However, they still depend on attention-based decoders to utilize linguistic information, not integrating linguistic cues into the visual model, thus limiting their effectiveness in enhancing CTC models.
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
\section{Method}
|
| 160 |
+
|
| 161 |
+
Fig.~\ref{fig:svtrv2_overview} illustrates the overview of SVTRv2. A text image is first resized by MSR to the closest aspect ratio, forming the input \( \mathbf{X} \in \mathbb{R}^{3 \times H \times W} \), which then experiences three consecutive feature extraction stages, yielding visual features $\mathbf{F}\in \mathbb{R}^{\frac{H}{8} \times \frac{W}{4} \times D_2}$. During training, $\mathbf{F}$ is fed into both SGM and FRM. SGM guides SVTRv2 to model linguistic context, while FRM rearranges $F$ into the character feature sequence $\mathbf{\tilde{F}} \in \mathbb{R}^{\frac{W}{4} \times D_2}$, which is synchronized with the text reading order and aligns with the label sequence. During inference, the SGM is discarded for efficiency.
|
| 162 |
+
%\textbf{\textit{X}} then passes sequentially through three stages
|
| 163 |
+
|
| 164 |
+
\subsection{Multi-Size Resizing (MSR)}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
Previous works typically resize irregular text images to a fixed size, such as $32 \times 128$, which may cause undesired text distortion and severely affect the quality of extracted visual features. To address this issue, we propose a simple yet effective multi-size resizing (MSR) strategy that resizes text shapes based on the aspect ratio ($R=\frac{W}{H}$). Specifically, we define four specific sizes: [64, 64], [48, 96], [40, 112], and [32, $\lfloor
|
| 168 |
+
R\rfloor \times$ 32], respectively corresponding to aspect ratio: $R<$ 1.5 ($R_1$), 1.5 $\leq R <$ 2.5 ($R_2$), 2.5 $\leq R <$ 3.5 ($R_3$), and $R\geq$ 3.5 ($R_4$). Note that the first three buckets are fixed thus text instances in the same one can be trained in batch, while the fourth one can handle long text without introducing significant distortion.
|
| 169 |
+
Therefore, MSR allows text instances adaptively resized under the principles of roughly maintaining their aspect ratios, and significant text distortion caused by resizing is almost eliminated.
|
| 170 |
+
|
| 171 |
+
\subsection{Visual Feature Extraction}
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
Motivated by SVTR~\cite{duijcai2022svtr}, the visual model of SVTRv2 comprises three stages, with \( \text{stage}_i \) containing \( N_i \) mixing blocks, as illustrated in Fig.~\ref{fig:svtrv2_overview}. To extract discriminative visual features, we devise two types of mixing blocks: local and global. Unlike SVTR, for being able to handle multiple sizes, we do not use absolute positional encoding. In contrast, to model positional information, we implement local mixing as two consecutive grouped convolutions alternative to window attention \cite{duijcai2022svtr}, and effectively capturing local character features, such as edges, textures, and strokes. Meanwhile, global mixing is realized by the multi-head self-attention (MHSA) mechanism~\cite{NIPS2017_attn}. This mechanism performs global contextual modeling on features, thereby enhancing the model's comprehension of inter-character relationships and the overall text image. Both the number of groups in the grouped convolution and the number of heads in MHSA are set to \( \frac{D_i}{32} \). Similar to SVTR~\cite{duijcai2022svtr}, by adjusting hyper-parameters $N_i$ and $D_i$, we derive three variants of SVTRv2 with different capacities, i.e., Tiny (T), Small (S), and Base (B), which are detailed in \textit{Suppl. Sec.}~7.
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
\subsection{Feature Rearranging Module (FRM)}
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
To address the alignment puzzle caused by text irregularities like rotated text, we propose a feature rearrangement module (FRM). This module rearranges the visual features \( \mathbf{F} \in \mathbb{R}^{(\frac{H}{8} \times \frac{W}{4}) \times D_2} \) into a sequence \( \mathbf{\tilde{F}} \in \mathbb{R}^{\frac{W}{4} \times D_2} \) that conforms to the CTC alignment requirement. We model the rearrangement process as a soft mapping using a probability matrix \( \mathbf{M} \in \mathbb{R}^{\frac{W}{4} \times (\frac{H}{8} \times \frac{W}{4})} \), where each element \( \mathbf{M}_{m,i,j} \) represents the probability of mapping the original feature \( \mathbf{F}_{i,j} \) to the rearranged feature \( \mathbf{\tilde{F}}_m \), where \( i \in \{1, 2, \ldots, \frac{H}{8}\} \) and \( j, m \in \{1, 2, \ldots, \frac{W}{4}\} \). Consequently, the softly rearrangement process is formalized as: \(\mathbf{\tilde{F}} = \mathbf{M} \times \mathbf{F}.
|
| 184 |
+
\)
|
| 185 |
+
|
| 186 |
+
FRM is responsible for learning the matrix \( \mathbf{M} \). To ensure that the module is sensitive to text orientation, we decompose the learning process of \( \mathbf{M} \) into two sequential steps: horizontal rearranging and vertical rearranging. As illustrated in Fig.~\ref{fig:svtrv2_overview}, the horizontal one processes each row of the visual feature \( \mathbf{F} \), denoted as \( \mathbf{F}_i \in \mathbb{R}^{\frac{W}{4} \times D_2} \), to learn a horizontal rearrangement matrix \( \mathbf{M}^h_i \in \mathbb{R}^{\frac{W}{4} \times \frac{W}{4}} \), which rearranges visual features along the horizontal direction. We implement this process using a multi-head self-attention mechanism as follows:
|
| 187 |
+
\begin{gather}
|
| 188 |
+
\label{eq:h_matrix}
|
| 189 |
+
\mathbf{M}^h_i = \sigma\left(\mathbf{F}_i\mathbf{W}^q_i\left(\mathbf{F}_i\mathbf{W}^k_i\right)^t\right) \\ \notag
|
| 190 |
+
\mathbf{F}^{h'}_i = \text{LN}(\mathbf{M}^h_i\mathbf{F}_i\mathbf{W}^v_i+\mathbf{F}_i), \mathbf{F}^h_i = \text{LN}(\text{MLP}(\mathbf{F}^{h'}_i)+\mathbf{F}^{h'}_i)
|
| 191 |
+
\end{gather}
|
| 192 |
+
where \( \mathbf{W}^q_i, \mathbf{W}^k_i, \mathbf{W}^v_i \in \mathbb{R}^{D_2 \times D_2} \) are learnable weights, \( \sigma \) and \( ()^t \) denote the softmax function and matrix transpose operation, respectively. LN ans MLP means Layer Normalization and Multi-Layer Perceptron with an expansion rate of 4, respectively. \( \mathbf{F}^h = \{\mathbf{F}^h_1,\mathbf{F}^h_2,\ldots,\mathbf{F}^h_{\frac{H}{8}}\} \) represents the horizontally rearranged visual features.
|
| 193 |
+
|
| 194 |
+
Similarly, the vertical rearrangement processes visual features column-wise. Unlike the horizontal step, we introduce a selecting token \( \mathbf{T}^s \), which interacts with all column features via cross-attention to learn a vertical rearrangement matrix \( \mathbf{M}^v \). The elements of \( \mathbf{M}^v \) represent the probability of mapping column features to the rearranged features, yielding \( \mathbf{\tilde{F}} \) as follows:
|
| 195 |
+
\begin{equation}
|
| 196 |
+
\label{eq:v_matrix}
|
| 197 |
+
\mathbf{M}^v_j = \sigma\left(\mathbf{T}^s\left(\mathbf{F}^h_{:,j}\mathbf{W}^k_j\right)^t\right),~ \mathbf{F}^v_j = \mathbf{M}^v_j\mathbf{F}^h_{:,j}\mathbf{W}^v_j
|
| 198 |
+
\end{equation}
|
| 199 |
+
where \( \mathbf{W}^q_j, \mathbf{W}^k_j, \mathbf{W}^v_j \in \mathbb{R}^{D_2 \times D_2} \) are learnable weights.
|
| 200 |
+
|
| 201 |
+
We denote \( \mathbf{F}^v = \{\mathbf{F}^v_1,\mathbf{F}^v_2,\ldots,\mathbf{F}^v_{\frac{W}{4}}\} \in \mathbb{R}^{\frac{W}{4} \times D_2} \) as the final rearranged feature sequence \( \mathbf{\tilde{F}} \). The predicted character sequence \( \mathbf{\tilde{Y}}_{ctc} \in \mathbb{R}^{\frac{W}{4} \times N_c} \) is then obtained by passing \( \mathbf{\tilde{F}} \) through the classifier \(\mathbf{\tilde{Y}}_{ctc} = \mathbf{\tilde{F}} \mathbf{W}^{ctc}, \)
|
| 202 |
+
where \( \mathbf{W}^{ctc} \in \mathbb{R}^{D_2 \times N_c} \) is the learnable weight matrix. The predicted sequence is then aligned with the ground truth sequence \( \mathbf{Y} \) using the CTC alignment rule.
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
\subsection{Semantic Guidance Module (SGM)}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
\begin{figure}[t]
|
| 214 |
+
\centering
|
| 215 |
+
\includegraphics[width=0.48\textwidth]{SVTRv2_smtr.pdf}
|
| 216 |
+
\caption{Visualization of attention maps when recognizing the target character by string matching on both sides, where $\textit{l}_i$ is set to 5. [P] denotes the padding symbol.}
|
| 217 |
+
\label{fig:smtr}
|
| 218 |
+
\end{figure}
|
| 219 |
+
|
| 220 |
+
CTC models classify visual features directly to obtain recognition results. This scheme inherently requires that the linguistic context must be incorporated into visual features, only that the CTC could benefit from it. In light of this, we propose a semantic guidance module (SGM) as follows.
|
| 221 |
+
|
| 222 |
+
For a text image with character labels \( \mathbf{Y}\) = \{\(c_1\), \(c_2\), \(\dots\), \(c_L\)\}, where \(c_i \) is the $i$-th character, we define its contextual information as the surrounding left string \( \mathbf{S}^l_i \) = \( \{c_{i-l_s}, \dots, c_{i-1}\} \) and right string \( \mathbf{S}^r_i\) = \(\{c_{i+1}, \dots, c_{i+l_s}\} \), where \( l_s \) denotes the size of the context window. SGM's role is to guide the visual model to integrate context from both \( \mathbf{S}^l_i \) and \( \mathbf{S}^r_i \) into visual features.
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
We describe the process using the left string \( \mathbf{S}^l_i \). First, the characters in \( \mathbf{S}^l_i \) are mapped to string embeddings \( \mathbf{E}^{l}_i \in \mathbb{R}^{l_s \times D_2} \). Then, these embeddings are encoded to create a hidden representation \( \mathbf{Q}^l_i \in \mathbb{R}^{1 \times D_2} \), representing the context of the left-side string \( \mathbf{S}^l_i \). In the following, the attention map \( \mathbf{A}^l_i \) is computed by applying a dot product between the hidden representation \( \mathbf{Q}^l_i \) and the visual features \( \mathbf{F} \), transformed by learned weight matrices \( \mathbf{W}^q \) and \( \mathbf{W}^k \). The formulation is as follows:
|
| 226 |
+
\begin{gather}
|
| 227 |
+
\mathbf{Q}^l_i = \text{LN}\left(\sigma\left(\mathbf{T}^l \mathbf{W}^q \left(\mathbf{E}^{l}_i \mathbf{W}^k \right)^t\right)\mathbf{E}^{l}_i \mathbf{W}^v + \mathbf{T}^l\right) \\ \notag
|
| 228 |
+
\mathbf{A}^l_i = \sigma\left(\mathbf{Q}^l_i \mathbf{W}^q \left(\mathbf{F} \mathbf{W}^k\right)^t \right),~ \mathbf{F}^l_i = \mathbf{A}^l_i \mathbf{F} \mathbf{W}^v
|
| 229 |
+
\end{gather}
|
| 230 |
+
where \( \mathbf{T}^l \in \mathbb{R}^{1 \times D_2} \) represents a predefined token encoding the left-side string. The attention map \( \mathbf{A}^l_i \) is used to weight the visual features \( \mathbf{F} \), producing a feature \( \mathbf{F}^l_i \in \mathbb{R}^{1 \times D_2} \) corresponding to character \( c_i \). After processing through the classifier $\mathbf{\tilde{Y}}^l_i=\mathbf{F}^l_i \mathbf{W}^{sgm}$, the predicted class probabilities \( \mathbf{\tilde{Y}}^l_i \in \mathbb{R}^{1 \times N_c} \) for \( c_i \) is obtained to calculate the cross-entropy loss, where $\mathbf{W}^{sgm} \in \mathbb{R}^{D_2 \times N_c}$ is learnable weights.
|
| 231 |
+
|
| 232 |
+
The weight of the attention map \( \mathbf{A}^l_i \) records the relevance of \( \mathbf{Q}^l_i \) to visual features \( \mathbf{F} \), and moreover, \( \mathbf{Q}^l_i \) represents the context of string \( \mathbf{S}^l_i \). So only when the visual model incorporates the context from \( \mathbf{S}^l_i \) into the visual features of the target character \( c_i \), the attention map \( \mathbf{A}^l_i \) can maximize the relevance between \( \mathbf{Q}^l_i \) and visual features of that character, thus accurately highlighting the corresponding position of character \( c_i \), as shown in Fig.~\ref{fig:smtr}. A similar process can be applied to the right-side string \( \mathbf{S}^r_i \), where the corresponding attention map \( \mathbf{A}^r_i \) and visual feature \( \mathbf{F}^r_i \) contribute to the prediction \( \mathbf{\tilde{Y}}^r_i \). By leveraging the above scheme during training, SGM effectively guides the visual model in integrating linguistic context into visual features. Consequently, even when SGM is not used during inference, the linguistic context can still be maintained and enhancing the accuracy of CTC models.
|
| 233 |
+
|
| 234 |
+
Note that although SGM is a decoder-based module, during inference it has been discarded and SVTRv2 becomes a purely CTC model. In contrast, previous methods, such as VisionLAN~\cite{Wang_2021_visionlan} and LPV~\cite{ijcai2023LPV}, despite modeling linguistic context using visual features, still rely on attention-based decoders to activate linguistic information during inference, a process that is incompatible with CTC models.
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
\subsection{Optimization Objective}
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
During training, the optimization objective is to minimize the loss $\mathcal{L}$, which comprises $\mathcal{L}_{ctc}$ and $\mathcal{L}_{sgm}$ as listed below:
|
| 246 |
+
\begin{align}
|
| 247 |
+
\mathcal{L}_{ctc} &= CTCLoss(\mathbf{\tilde{Y}}_{ctc}, \mathbf{Y}) \\ \notag
|
| 248 |
+
\mathcal{L}_{sgm} &= \frac{1}{2L} \sum\nolimits_{i=1}^{L}(CE(\mathbf{\tilde{Y}}^l_i, c_i) + CE(\mathbf{\tilde{Y}}^r_i, c_i)) \\ \notag
|
| 249 |
+
\mathcal{L} &= \lambda_1 \mathcal{L}_{ctc} + \lambda_2 \mathcal{L}_{sgm}
|
| 250 |
+
\end{align}
|
| 251 |
+
\noindent where \(CE\) represents the cross-entropy loss, \(\lambda_1\) and \(\lambda_2\) are weighting parameters setting to 0.1 and 1, respectively.
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
\section{Experiments}
|
| 255 |
+
|
| 256 |
+
\subsection{Datasets and Implementation Details}
|
| 257 |
+
\label{sec:Implementation}
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
We evaluate SVTRv2 across multiple benchmarks covering diverse scenarios. They are: 1) six common regular and irregular benchmarks (\textit{Com}), including ICDAR 2013 (\textit{IC13})~\cite{icdar2013}, Street View Text (\textit{SVT})~\cite{Wang2011SVT}, IIIT5K-Words (\textit{IIIT5K})~\cite{IIIT5K}, ICDAR 2015 (\textit{IC15})~\cite{icdar2015}, Street View Text-Perspective (\textit{SVTP})~\cite{SVTP} and \textit{CUTE80}~\cite{Risnumawan2014cute}. For IC13 and IC15, we use the versions with 857 and 1811 images, respectively; 2) the recent Union14M-Benchmark (\textit{U14M})~\cite{jiang2023revisiting}, which includes seven challenging subsets: \textit{Curve}, \textit{Multi-Oriented (MO)}, \textit{Artistic}, \textit{Contextless}, \textit{Salient}, \textit{Multi-Words} and \textit{General}; 3) occluded scene text dataset (\textit{OST})~\cite{Wang_2021_visionlan}, which is categorized into two subsets based on the degree of occlusion: weak occlusion (\textit{OST}$_w$) and heavy occlusion (\textit{OST}$_h$); 4) long text benchmark (\textit{LTB})~\cite{du2024smtr}, which includes 3376 samples of text length from 25 to 35; 5) the test set of BCTR~\cite{chen2021benchmarking}, a Chinese text recognition benchmark with four subsets: \textit{Scene}, \textit{Web}, \textit{Document} (\textit{Doc}) and \textit{Hand-Writing} (\textit{HW}).
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
For English recognition, there are three large-scale real-world training sets, i.e., the \textit{Real} dataset~\cite{BautistaA22PARSeq}, \textit{REBU-Syn}~\cite{Rang_2024_CVPR_clip4str}, and \textit{Union14M-L}~\cite{jiang2023revisiting}. However, they all overlap with \textit{U14M} (detailed in \textit{Suppl.~Sec.}~8) across the seven subsets, leading to data leakage, which makes them unsuitable for training models. To resolve this, we introduce a filtered version of \textit{Union14M-L}, termed as \textit{U14M-Filter}, by filtering these overlapping instances. This new dataset is used to train SVTRv2 and 24 popular STR methods.
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
For Chinese recognition, we train models on the training set of \textit{BCTR}~\cite{chen2021benchmarking}. Unlike previous methods that train separately for each subset, we trained the model on their integration and then evaluated it on the four subsets.
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
% For English model, we take SVTRv2 without SGM as the pre-trained and fine-tuned SVTRv2 with SGM with the same settings
|
| 273 |
+
|
| 274 |
+
We use AdamW optimizer~\cite{adamw} with a weight decay of 0.05 for training. The LR is set to $6.5\times 10^{-4}$ and batchsize is set to 1024. One cycle LR scheduler~\cite{cosine} with 1.5/4.5 epochs linear warm-up is used in all the 20/100 epochs, where a/b means a for English and b for Chinese. For English models, the training is conducted in two phases: firstly without SGM and then with SGM, both using the above settings. Word accuracy is used as the evaluation metric. Data augmentation like rotation, perspective distortion, motion blur, and gaussian noise, are randomly performed. The maximum text length is set to 25 during training. The size of the character set $N_c$ is set to 94 for English and 6624~\cite{ppocrv3} for Chinese. In the experiments below, SVTRv2 means SVTRv2-B unless specified. All models are trained on 4 RTX 4090 GPUs.
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
\subsection{Ablation Study}
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
\noindent\textbf{Effectiveness of MSR}. We group \textit{Curve} and \textit{MO} text in \textit{U14M} based on the aspect ratio $R_i$. As shown in Tab.~\ref{tab:msr_FRM}, the majority of irregular texts fall within $R_1$ and $R_2$, where they are particularly prone to distortion when resized to a fixed size (see \textit{Fixed}$_{32\times128}$ in Fig.~\ref{fig:case}). In contrast, MSR demonstrates significant improvements of 15.3\% in $R_1$ and 5.2\% in $R_2$ compared to \textit{Fixed}$_{32\times128}$. Meanwhile, a large fixed-size \textit{Fixed}$_{64\times256}$, although improving the accuracy compared to the baseline, still performs worse than our MSR by clear margins. The results strongly confirm our hypothesis that undesired resizing would hurt the recognition. Our MSR effectively mitigates this issue, providing better visual features thus enhancing the recognition accuracy.
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
\noindent\textbf{Effectiveness of FRM}. We ablate the two rearrangement sub-modules (Horizontal (H) rearranging and Vertical (V) rearranging). As shown in Tab.~\ref{tab:msr_FRM}, compared to without FRM (w/o FRM), they individually improve accuracy by 2.03\% and 0.71\% on \textit{MO}, and they together result in a 2.46\% gain. In addition, we validate the use of a Transformer Block (+ TF$_1$) as an alternative to splitting the process into two steps for learning the matrix $\mathbf{M}$ holistically. However, its effectiveness is less pronounced, likely because it fails to effectively distinguish between vertical and horizontal orientations. In contrast, FRM performs feature rearrangement in both directions, making it highly sensitive to text irregularity, and thus facilitating accurate CTC alignment. As shown in the left five cases in Fig.~\ref{fig:case}, FRM successfully recognizes reverse instances, providing strong evidence of FRM's effectiveness.
|
| 286 |
+
|
| 287 |
+
\begin{table}[t]\footnotesize
|
| 288 |
+
\centering
|
| 289 |
+
\setlength{\tabcolsep}{1pt}{
|
| 290 |
+
\begin{tabular}{c|c|cccc|cc|cc}
|
| 291 |
+
\toprule
|
| 292 |
+
|
| 293 |
+
\multicolumn{2}{c|}{} & \begin{tabular}[c]{@{}c@{}}$R_1$\\ 2,688\end{tabular} & \begin{tabular}[c]{@{}c@{}}$R_2$\\ 788\end{tabular} & \begin{tabular}[c]{@{}c@{}}$R_3$\\ 266\end{tabular} & \begin{tabular}[c]{@{}c@{}}$R_4$\\ 32\end{tabular} & \textit{Curve} & \textit{MO} & \textit{Com} & \textit{U14M} \\
|
| 294 |
+
|
| 295 |
+
\midrule
|
| 296 |
+
|
| 297 |
+
\multicolumn{2}{c|}{SVTRv2 (+MSR+FRM)} & 87.4 & 88.3 & 86.1 & 87.5 & 88.17 & 86.19 & 96.16 & 83.86 \\
|
| 298 |
+
\multicolumn{2}{c|}{SVTRv2 (w/o both)} & 70.5 & 81.5 & 82.8 & 84.4 & 82.89 & 65.59 & 95.28 & 77.78 \\
|
| 299 |
+
\midrule
|
| 300 |
+
\multirow{3}{*}{\begin{tabular}[c]{@{}c@{}}vs.\\ MSR \\ (+FRM)\end{tabular}} & Fixed$_{32\times 128}$ & 72.1 & 83.1 & 84.1 & 85.6 & 83.18 & 68.71 & 95.56 & 78.87 \\
|
| 301 |
+
& Padding$_{32\times W}$ & 52.1 & 71.3 & 82.3 & 87.4 & 71.06 & 51.57 & 94.70 & 71.82 \\
|
| 302 |
+
& Fixed$_{64\times 256}$ & 76.6 & 81.6 & 81.9 & 80.2 & 85.70 & 67.49 & 95.07 & 79.03 \\
|
| 303 |
+
\midrule
|
| 304 |
+
\multirow{4}{*}{\begin{tabular}[c]{@{}c@{}}vs.\\ FRM \\ (+MSR)\end{tabular}} & w/o FRM & 85.7 & 86.3 & 86.0 & 85.5 & 87.35 & 83.73 & 95.44 & 82.22 \\
|
| 305 |
+
& + H rearranging & 87.0 & 87.1 & 86.3 & 85.5 & 88.05 & 85.76 & 95.98 & 82.94 \\
|
| 306 |
+
& + V rearranging & 85.0 & 87.6 & 88.5 & 85.5 & 88.01 & 84.44 & 95.66 & 82.70 \\
|
| 307 |
+
& + TF$_1$ & 86.4 & 86.3 & 87.5 & 86.1 & 87.51 & 85.50 & 95.60 & 82.49 \\
|
| 308 |
+
\bottomrule
|
| 309 |
+
\toprule
|
| 310 |
+
\multirow{5}{*}{-} & ResNet+TF$_3$ & 49.3 & 63.5 & 64.0 & 66.7 & 65.00 & 42.07 & 92.26 & 63.00 \\
|
| 311 |
+
& FocalNet-B & 56.7 & 73.2 & 75.3 & 73.9 & 76.46 & 45.80 & 94.49 & 71.63 \\
|
| 312 |
+
& ConvNeXtV2 & 58.4 & 71.0 & 73.6 & 71.2 & 75.97 & 45.95 & 93.93 & 70.43 \\
|
| 313 |
+
& ViT-S & 68.5 & 73.8 & 73.8 & 73.0 & 75.02 & 64.35 & 93.57 & 72.09 \\
|
| 314 |
+
& SVTR-B & 53.3 & 74.8 & 76.4 & 78.4 & 76.22 & 44.49 & 94.58 & 71.17 \\
|
| 315 |
+
\midrule
|
| 316 |
+
\multirow{5}{*}{+FRM} & ResNet+TF$_3$ & 53.8 & 67.9 & 65.5 & 65.8 & 69.00 & 46.02 & 93.12 & 66.81 \\
|
| 317 |
+
& FocalNet-B & 57.1 & 75.2 & 77.1 & 78.4 & 75.52 & 51.21 & 94.39 & 72.73 \\
|
| 318 |
+
& ConvNeXtV2 & 60.7 & 79.0 & 79.0 & 81.1 & 79.72 & 53.32 & 94.19 & 73.09 \\
|
| 319 |
+
& ViT-S & 75.1 & 79.4 & 79.0 & 78.4 & 80.42 & 72.17 & 94.44 & 77.07 \\
|
| 320 |
+
& SVTR-B & 59.1 & 79.0 & 78.8 & 80.2 & 79.84 & 51.28 & 94.75 & 73.48 \\
|
| 321 |
+
\midrule
|
| 322 |
+
\multirow{3}{*}{+MSR} & ResNet+TF$_3$ & 68.2 & 71.3 & 75.3 & 72.1 & 75.64 & 60.33 & 93.50 & 71.95 \\
|
| 323 |
+
& FocalNet-B & 80.5 & 80.6 & 79.2 & 85.0 & 82.26 & 74.82 & 94.92 & 78.94 \\
|
| 324 |
+
& ConvNeXtV2 & 76.2 & 79.0 & 82.3 & 80.2 & 81.05 & 73.27 & 94.60 & 77.71 \\
|
| 325 |
+
\midrule
|
| 326 |
+
\multicolumn{10}{c}{\setlength{\tabcolsep}{1pt}{
|
| 327 |
+
\begin{tabular}{c|ccc|ccc|cc}
|
| 328 |
+
- / + SGM & \textit{OST}$_w$ & \textit{OST}$_h$ & Avg & \textit{OST}$_w^*$ & \textit{OST}$_h^*$ & Avg & \textit{Com}$^*$ & \textit{U14M}$^*$ \\
|
| 329 |
+
\midrule
|
| 330 |
+
ResNet+TF$_3$ & 71.6 & 51.8 & 61.72 & 77.9 & 55.0 & 66.43 & 95.19 & 78.61 \\
|
| 331 |
+
FocalNet-B & 78.9 & 62.8 & 70.88 & 84.6 & 70.6 & 77.61 & 96.28 & 84.10 \\
|
| 332 |
+
ConvNeXtV2 & 76.0 & 58.2 & 67.10 & 82.0 & 63.9 & 72.97 & 96.09 & 82.10 \\
|
| 333 |
+
\end{tabular}}} \\
|
| 334 |
+
\bottomrule
|
| 335 |
+
\end{tabular}}
|
| 336 |
+
\caption{Ablations on MSR and FRM (top) and assessing MSR, FRM, and SGM across visual models (lower). * means with SGM.}
|
| 337 |
+
\label{tab:msr_FRM}
|
| 338 |
+
\end{table}
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
\begin{table}[t]\footnotesize
|
| 342 |
+
\centering
|
| 343 |
+
\setlength{\tabcolsep}{3pt}{
|
| 344 |
+
\begin{tabular}{c|c|ccc|cc}
|
| 345 |
+
\toprule
|
| 346 |
+
& Method & \textit{OST}$_w$ & \textit{OST}$_h$ & Avg & \textit{Com} & \textit{U14M} \\
|
| 347 |
+
\midrule
|
| 348 |
+
\multirow{7}{*}{\begin{tabular}[c]{@{}c@{}}Linguistic \\context \\ modeling\end{tabular}} & w/o SGM & 82.86 & 66.97 & 74.92 & 96.16 & 83.86 \\
|
| 349 |
+
& SGM & \textbf{86.26} & \textbf{73.80} & \textbf{80.03} & \textbf{96.57} & \textbf{86.14} \\
|
| 350 |
+
& GTC~\cite{hu2020gtc} & 83.07 & 68.32 & 75.70 & 96.01 & 84.33 \\
|
| 351 |
+
& ABINet~\cite{fang2021abinet} & 83.07 & 67.54 & 75.31 & 96.25 & 84.17 \\
|
| 352 |
+
& VisionLAN~\cite{Wang_2021_visionlan} & 83.25 & 68.97 & 76.11 & 96.39 & 84.01 \\
|
| 353 |
+
& PARSeq~\cite{BautistaA22PARSeq} & 83.85 & 69.24 & 76.55 & 96.21 & 84.72 \\
|
| 354 |
+
& MAERec~\cite{jiang2023revisiting} & 83.21 & 69.69 & 76.45 & 96.47 & 84.69 \\
|
| 355 |
+
\bottomrule
|
| 356 |
+
\end{tabular}}
|
| 357 |
+
\caption{Comparison of the proposed SGM with other language models in linguistic context modeling on \textit{OST}. }
|
| 358 |
+
\label{tab:semantic}
|
| 359 |
+
\end{table}
|
| 360 |
+
|
| 361 |
+
\begin{figure*}[t]
|
| 362 |
+
\centering
|
| 363 |
+
\includegraphics[width=0.98\textwidth]{SVTRv2_case.pdf}
|
| 364 |
+
\caption{Qualitative comparison of SVTRv2 with previous methods on irregular and occluded text. $^\dagger$ means that SVTRv2 utilizes the fixed-size (in \textit{Fixed}$_{32\times128}$ part) or rectification module (in \textit{TPS} part) as the resize strategy. \textit{MAERec*} means that SVTRv2$^\dagger$ integrates with the attention-based decoder from the previous best model, i.e. MAERec~\cite{jiang2023revisiting}, such a decoder is widely employed in~\cite{Sheng2019nrtr,pr2021MASTER,cvpr2021TransOCR,xie2022toward_cornertrans,yuICCV2023clipctr,yang2024class_cam,Xu_2024_CVPR_OTE}. \textcolor{green}{Green}, \textcolor{red}{red}, and \textcolor{red}{\_} denotes correctly, wrongly and missed recognition, respectively.}
|
| 365 |
+
\label{fig:case}
|
| 366 |
+
\end{figure*}
|
| 367 |
+
|
| 368 |
+
\begin{table*}[t]\footnotesize
|
| 369 |
+
\centering
|
| 370 |
+
\setlength{\tabcolsep}{1.8pt}{
|
| 371 |
+
\begin{tabular}{cc|c|c|ccccccc|cccccccc|c|c|c|c}
|
| 372 |
+
\multicolumn{23}{c}{\setlength{\tabcolsep}{3.5pt}{\begin{tabular}{
|
| 373 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 374 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 375 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 376 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 377 |
+
>{\columncolor[HTML]{FFFFC7}}c
|
| 378 |
+
>{\columncolor[HTML]{FFFFC7}}c c
|
| 379 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 380 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 381 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 382 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 383 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 384 |
+
>{\columncolor[HTML]{ECF4FF}}c
|
| 385 |
+
>{\columncolor[HTML]{ECF4FF}}c }
|
| 386 |
+
\toprule
|
| 387 |
+
\textit{IIIT5k} & \textit{SVT} & \textit{ICDAR2013} & \textit{ICDAR2015} & \textit{SVTP} & \textit{CUTE80} & $\|$ & \textit{Curve} & \textit{Multi-Oriented} & \textit{Artistic} & \textit{Contextless} & \textit{Salient} & \textit{Multi-Words} & \textit{General}
|
| 388 |
+
\end{tabular}}} \\
|
| 389 |
+
\toprule
|
| 390 |
+
\multicolumn{2}{c|}{Method} & Venue & Encoder & \multicolumn{6}{c}{\cellcolor[HTML]{FFFFC7}Common Benchmarks (\textit{Com})} & Avg & \multicolumn{7}{c}{\cellcolor[HTML]{ECF4FF}Union14M-Benchmark (\textit{U14M})} & Avg & \textit{LTB} & \textit{OST} & \textit{Size} & \textit{FPS} \\
|
| 391 |
+
\midrule
|
| 392 |
+
\multicolumn{2}{r|}{ASTER~\cite{shi2019aster}} & TPAMI19 & ResNet+LSTM & 96.1 & 93.0 & 94.9 & 86.1 & 87.9 & 92.0 & 91.70 & 70.9 & 82.2 & 56.7 & 62.9 & 73.9 & 58.5 & 76.3 & 68.75 & 0.1 & 61.9 &19.0 & 67.1 \\
|
| 393 |
+
\multicolumn{2}{r|}{NRTR~\cite{Sheng2019nrtr}} & ICDAR19 & Stem+TF$_6$ & 98.1 & 96.8 & 97.8 & 88.9 & 93.3 & 94.4 & 94.89 & 67.9 & 42.4 & 66.5 & 73.6 & 66.4 & 77.2 & 78.3 & 67.46 & 0.0 & 74.8 &44.3 & 17.3 \\
|
| 394 |
+
\multicolumn{2}{r|}{MORAN~\cite{pr2019MORAN}} & PR19 & ResNet+LSTM & 96.7 & 91.7 & 94.6 & 84.6 & 85.7 & 90.3 & 90.61 & 51.2 & 15.5 & 51.3 & 61.2 & 43.2 & 64.1 & 69.3 & 50.82 & 0.1 & 57.9 &17.4 & 59.5 \\
|
| 395 |
+
\multicolumn{2}{r|}{SAR~\cite{li2019sar}} & AAAI19 & ResNet+LSTM & 98.1 & 93.8 & 96.7 & 86.0 & 87.9 & 95.5 & 93.01 & 70.5 & 51.8 & 63.7 & 73.9 & 64.0 & 79.1 & 75.5 & 68.36 & 0.0 & 60.6 &57.5 & 15.8 \\
|
| 396 |
+
\multicolumn{2}{r|}{DAN~\cite{wang2020aaai_dan}} & AAAI20 & ResNet+FPN & 97.5 & 94.7 & 96.5 & 87.1 & 89.1 & 94.4 & 93.24 & 74.9 & 63.3 & 63.4 & 70.6 & 70.2 & 71.1 & 76.8 & 70.05 & 0.0 & 61.8 &27.7 & 99.0 \\
|
| 397 |
+
\multicolumn{2}{r|}{SRN~\cite{yu2020srn}} & CVPR20 & ResNet+FPN & 97.2 & 96.3 & 97.5 & 87.9 & 90.9 & 96.9 & 94.45 & 78.1 & 63.2 & 66.3 & 65.3 & 71.4 & 58.3 & 76.5 & 68.43 & 0.0 & 64.6 &51.7 & 67.1 \\
|
| 398 |
+
\multicolumn{2}{r|}{SEED~\cite{cvpr2020seed}} & CVPR20 & ResNet+LSTM & 96.5 & 93.2 & 94.2 & 87.5 & 88.7 & 93.4 & 92.24 & 69.1 & 80.9 & 56.9 & 63.9 & 73.4 & 61.3 & 76.5 & 68.87 & 0.1 & 62.6 &24.0 & 65.4\\
|
| 399 |
+
\multicolumn{2}{r|}{AutoSTR~\cite{zhang2020autostr}} & ECCV20 & NAS+LSTM & 96.8 & 92.4 & 95.7 & 86.6 & 88.2 & 93.4 & 92.19 & 72.1 & 81.7 & 56.7 & 64.8 & 75.4 & 64.0 & 75.9 & 70.09 & 0.1 & 61.5 &6.0 & 82.6\\
|
| 400 |
+
\multicolumn{2}{r|}{RoScanner~\cite{yue2020robustscanner}} & ECCV20 & ResNet & 98.5 & 95.8 & 97.7 & 88.2 & 90.1 & 97.6 & 94.65 & 79.4 & 68.1 & 70.5 & 79.6 & 71.6 & 82.5 & 80.8 & 76.08 & 0.0 & 68.6 &48.0 & 64.1 \\
|
| 401 |
+
\multicolumn{2}{r|}{ABINet~\cite{fang2021abinet}} & CVPR21 & ResNet+TF$_3$ & 98.5 & 98.1 & 97.7 & 90.1 & 94.1 & 96.5 & 95.83 & 80.4 & 69.0 & 71.7 & 74.7 & 77.6 & 76.8 & 79.8 & 75.72 & 0.0 & 75.0 &36.9 & 73.0 \\
|
| 402 |
+
\multicolumn{2}{r|}{VisionLAN~\cite{Wang_2021_visionlan}} & ICCV21 & ResNet+TF$_3$ & 98.2 & 95.8 & 97.1 & 88.6 & 91.2 & 96.2 & 94.50 & 79.6 & 71.4 & 67.9 & 73.7 & 76.1 & 73.9 & 79.1 & 74.53 & 0.0 & 66.4 &32.9 & 93.5 \\
|
| 403 |
+
\multicolumn{2}{r|}{PARSeq~\cite{BautistaA22PARSeq}} & ECCV22 & ViT-S & 98.9 & 98.1 & 98.4 & 90.1 & 94.3 & 98.6 & 96.40 & 87.6 & 88.8 & 76.5 & 83.4 & 84.4 & 84.3 & 84.9 & 84.26 & 0.0 & 79.9 &23.8 & 52.6 \\
|
| 404 |
+
\multicolumn{2}{r|}{MATRN~\cite{MATRN}} & ECCV22 & ResNet+TF$_3$ & 98.8 & 98.3 & 97.9 & 90.3 & 95.2 & 97.2 & 96.29 & 82.2 & 73.0 & 73.4 & 76.9 & 79.4 & 77.4 & 81.0 & 77.62 & 0.0 & 77.8 &44.3 & 46.9\\
|
| 405 |
+
\multicolumn{2}{r|}{MGP-STR~\cite{mgpstr}} & ECCV22 & ViT-B & 97.9 & 97.8 & 97.1 &89.6 &95.2 &96.9 &95.75 &85.2 &83.7 &72.6 &75.1 &79.8 &71.1 &83.1 &78.65 & 0.0 & 78.7 & 148 & 120\\
|
| 406 |
+
|
| 407 |
+
\multicolumn{2}{r|}{LPV~\cite{ijcai2023LPV}} & IJCAI23 & SVTR-B & 98.6 & 97.8 & 98.1 & 89.8 & 93.6 & 97.6 & 95.93 & 86.2 & 78.7 & 75.8 & 80.2 & 82.9 & 81.6 & 82.9 & 81.20 & 0.0 & 77.7 & 30.5 & 82.6\\
|
| 408 |
+
|
| 409 |
+
\multicolumn{2}{r|}{MAERec~\cite{jiang2023revisiting}} & ICCV23 & ViT-S & \textbf{99.2} & 97.8 & 98.2 & 90.4 & 94.3 & 98.3 & 96.36 & 89.1 & 87.1 & 79.0 & 84.2 & \textbf{86.3} & 85.9 & 84.6 & 85.17 & 9.8 & 76.4 & 35.7 & 17.1 \\
|
| 410 |
+
|
| 411 |
+
\multicolumn{2}{r|}{LISTER~\cite{iccv2023lister}} & ICCV23 & FocalNet-B & 98.8 & 97.5 & 98.6 & 90.0 & 94.4 & 96.9 & 96.03 & 78.7 & 68.8 & 73.7 & 81.6 & 74.8 & 82.4 & 83.5 & 77.64 & 36.3 & 77.1 &51.1 & 44.6 \\
|
| 412 |
+
\multicolumn{2}{r|}{CDistNet~\cite{zheng2024cdistnet}} & IJCV24 & ResNet+TF$_3$ & 98.7 & 97.1 & 97.8 & 89.6 & 93.5 & 96.9 & 95.59 & 81.7 & 77.1 & 72.6 & 78.2 & 79.9 & 79.7 & 81.1 & 78.62 & 0.0 & 71.8 &43.3 & 15.9\\
|
| 413 |
+
\multicolumn{2}{r|}{CAM~\cite{yang2024class_cam}} & PR24 & ConvNeXtV2 & 98.2 & 96.1 & 96.6 & 89.0 & 93.5 & 96.2 & 94.94 & 85.4 & 89.0 & 72.0 & 75.4 & 84.0 & 74.8 & 83.1 & 80.52 & 0.7 & 74.2 & 58.7 & 28.6\\
|
| 414 |
+
\multicolumn{2}{r|}{BUSNet~\cite{Wei_2024_busnet}} & AAAI24 & ViT-S & 98.3 & 98.1 & 97.8 & 90.2 & \textbf{95.3} & 96.5 & 96.06 & 83.0 & 82.3 & 70.8 & 77.9 & 78.8 & 71.2 & 82.6 & 78.10 & 0.0 & 78.7 &32.1 & 83.3\\
|
| 415 |
+
\multicolumn{2}{r|}{OTE~\cite{Xu_2024_CVPR_OTE}} & CVPR24 & SVTR-B & 98.6 & 96.6 & 98.0 & 90.1 & 94.0 & 97.2 & 95.74 & 86.0 & 75.8 & 74.6 & 74.7 & 81.0 & 65.3 & 82.3 & 77.09 & 0.0 & 77.8 &20.3 & 55.2\\
|
| 416 |
+
\multicolumn{2}{r|}{CPPD~\cite{du2023cppd}} & TPAMI25 & SVTR-B & 99.0 & 97.8 & 98.2 & 90.4 & 94.0 & \textbf{99.0} & 96.40 & 86.2 & 78.7 & 76.5 & 82.9 & 83.5 & 81.9 & 83.5 & 81.91 & 0.0 & 79.6 &27.0 & 125 \\
|
| 417 |
+
\multicolumn{2}{r|}{IGTR-AR~\cite{du2024igtr}} & TPAMI25 & SVTR-B & 98.7 & \textbf{98.4} & 98.1 & 90.5 & 94.9 & 98.3 & 96.48 & 90.4 & \textbf{91.2} & 77.0 & 82.4 & 84.7 & 84.0 & 84.4 & 84.86 & 0.0 & 76.3 & 24.1 & 58.3 \\
|
| 418 |
+
\multicolumn{2}{r|}{SMTR~\cite{du2024smtr}} & AAAI25 & FocalSVTR & 99.0 & 97.4 & 98.3 & 90.1 & 92.7 & 97.9 & 95.90 & 89.1 & 87.7 & 76.8 & 83.9 & 84.6 & \textbf{89.3} & 83.7 & 85.00 & \textbf{55.5} & 73.5 & 15.8 & 66.2 \\
|
| 419 |
+
\midrule
|
| 420 |
+
\multicolumn{1}{c|}{}& CRNN~\cite{shi2017crnn} & TPAMI16 & ResNet+LSTM & 95.8 & 91.8 & 94.6 & 84.9 & 83.1 & 91.0 & 90.21 & 48.1 & 13.0 & 51.2 & 62.3 & 41.4 & 60.4 & 68.2 & 49.24 & 47.2 &58.0 & 16.2 & 172\\
|
| 421 |
+
\multicolumn{1}{c|}{}& SVTR~\cite{duijcai2022svtr} & IJCAI22 & SVTR-B & 98.0 & 97.1 & 97.3 & 88.6 & 90.7 & 95.8 & 94.58 & 76.2 & 44.5 & 67.8 & 78.7 & 75.2 & 77.9 & 77.8 & 71.17 & 45.1 &69.6 & 18.1 & 161\\
|
| 422 |
+
\multicolumn{1}{c|}{} & \cellcolor[HTML]{EFEFEF} & \multicolumn{1}{c|}{\cellcolor[HTML]{EFEFEF}} & \cellcolor[HTML]{EFEFEF}SVTRv2-T & \cellcolor[HTML]{EFEFEF}98.6 & \cellcolor[HTML]{EFEFEF}96.6 & \cellcolor[HTML]{EFEFEF}98.0 & \cellcolor[HTML]{EFEFEF}88.4 & \cellcolor[HTML]{EFEFEF}90.5 & \cellcolor[HTML]{EFEFEF}96.5 & \cellcolor[HTML]{EFEFEF}94.78 & \cellcolor[HTML]{EFEFEF}83.6 & \cellcolor[HTML]{EFEFEF}76.0 & \cellcolor[HTML]{EFEFEF}71.2 & \cellcolor[HTML]{EFEFEF}82.4 & \cellcolor[HTML]{EFEFEF}77.2 & \cellcolor[HTML]{EFEFEF}82.3 & \cellcolor[HTML]{EFEFEF}80.7 & \cellcolor[HTML]{EFEFEF}79.05 & \cellcolor[HTML]{EFEFEF}47.8 & \cellcolor[HTML]{EFEFEF}71.4 & \cellcolor[HTML]{EFEFEF}5.1 & \cellcolor[HTML]{EFEFEF}201\\
|
| 423 |
+
\multicolumn{1}{c|}{} & \cellcolor[HTML]{EFEFEF} & \multicolumn{1}{c|}{\cellcolor[HTML]{EFEFEF}} & \cellcolor[HTML]{EFEFEF}SVTRv2-S & \cellcolor[HTML]{EFEFEF}99.0 & \cellcolor[HTML]{EFEFEF}98.3 & \cellcolor[HTML]{EFEFEF}98.5 & \cellcolor[HTML]{EFEFEF}89.5 & \cellcolor[HTML]{EFEFEF}92.9 & \cellcolor[HTML]{EFEFEF}98.6 & \cellcolor[HTML]{EFEFEF}96.13 & \cellcolor[HTML]{EFEFEF}88.3 & \cellcolor[HTML]{EFEFEF}84.6 & \cellcolor[HTML]{EFEFEF}76.5 & \cellcolor[HTML]{EFEFEF}84.3 & \cellcolor[HTML]{EFEFEF}83.3 & \cellcolor[HTML]{EFEFEF}85.4 & \cellcolor[HTML]{EFEFEF}83.5 & \cellcolor[HTML]{EFEFEF}83.70 & \cellcolor[HTML]{EFEFEF}47.6 & \cellcolor[HTML]{EFEFEF}78.0 & \cellcolor[HTML]{EFEFEF}11.3 & \cellcolor[HTML]{EFEFEF}189\\
|
| 424 |
+
|
| 425 |
+
\multicolumn{1}{c|}{\multirow{-5}{*}{\begin{tabular}[c]{@{}c@{}}C\\ T\\ C\end{tabular}}} & \multirow{-3}{*}{\cellcolor[HTML]{EFEFEF}SVTRv2} & \multicolumn{1}{c|}{\multirow{-3}{*}{\cellcolor[HTML]{EFEFEF}-}} & \cellcolor[HTML]{EFEFEF}SVTRv2-B & \cellcolor[HTML]{EFEFEF}\textbf{99.2} & \cellcolor[HTML]{EFEFEF}98.0 & \cellcolor[HTML]{EFEFEF}\textbf{98.7} & \cellcolor[HTML]{EFEFEF}\textbf{91.1} & \cellcolor[HTML]{EFEFEF}93.5 & \cellcolor[HTML]{EFEFEF}\textbf{99.0} & \cellcolor[HTML]{EFEFEF}\textbf{96.57} & \cellcolor[HTML]{EFEFEF}\textbf{90.6} & \cellcolor[HTML]{EFEFEF}89.0 & \cellcolor[HTML]{EFEFEF}\textbf{79.3} & \cellcolor[HTML]{EFEFEF}\textbf{86.1} & \cellcolor[HTML]{EFEFEF}86.2 & \cellcolor[HTML]{EFEFEF}86.7 & \cellcolor[HTML]{EFEFEF}\textbf{85.1} & \cellcolor[HTML]{EFEFEF}\textbf{86.14} & \cellcolor[HTML]{EFEFEF}50.2 & \cellcolor[HTML]{EFEFEF}\textbf{80.0} & \cellcolor[HTML]{EFEFEF}19.8 & \cellcolor[HTML]{EFEFEF}143
|
| 426 |
+
\\
|
| 427 |
+
\bottomrule
|
| 428 |
+
\end{tabular}}
|
| 429 |
+
\caption{All the models and SVTRv2 are trained on \textit{U14M-Filter}. To ensuring that the results reflect the true potential of these methods under their best experimental settings, we conducted extensive tuning (detailed in \textit{Suppl.~Sec.}~12) of the model-specific settings (e.g., optimizer, learning rate, and regularization) and reported the best result we got. TF$_n$ denotes the $n$-layer Transformer block~\cite{NIPS2017_attn}. \textit{Size} denotes the number of parameters of the model ($\times 10^6$). \textit{FPS} is measured on one NVIDIA 1080Ti GPU.}
|
| 430 |
+
\label{tab:sota}
|
| 431 |
+
\end{table*}
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
\noindent\textbf{Effectiveness of SGM}. As illustrated in Tab.~\ref{tab:semantic}, SGM achieves 0.41\% and 2.28\% increase on \textit{Com} and \textit{U14M}, respectively, while gains a 5.11\% improvement on \textit{OST}. Since \textit{OST} frequently suffers from missing a portion of characters, this notable gain implies that the linguistic context has been successfully established. For comparison, we also employ GTC~\cite{hu2020gtc} and four popular language decoders~\cite{fang2021abinet,Wang_2021_visionlan,BautistaA22PARSeq,jiang2023revisiting} to substitute for our SGM. However, there is no much difference between the gains obtained from \textit{OST} and the other two datasets (\textit{Com} and \textit{U14M}). This suggests that SGM offers a distinct advantage in integrating linguistic context into visual features, and significantly improving the recognition accuracy of CTC models. The five cases on the right side of Fig.~\ref{fig:case} showcase that SGM facilitates SVTRv2 to accurately decipher occluded characters, achieving comparable results with PARSeq~\cite{BautistaA22PARSeq}, which is equipped with an advanced permuted language model.
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
\noindent\textbf{Adaptability to different visual models.} We further examine MSR, FRM, and SGM on five frequently used visual models~\cite{he2016resnet,dosovitskiy2020vit,duijcai2022svtr,YangLDG22focalnet,WooDHC0KX23_ConvNeXtv2}. As presented in the bottom part of Tab.~\ref{tab:msr_FRM}, these modules consistently enhance the performance (ViT~\cite{dosovitskiy2020vit} and SVTR~\cite{duijcai2022svtr} employ absolute positional coding and do not compatible with MSR). When both FRM and MSR modules incorporated, ResNet+TF$_3$~\cite{he2016resnet}, FocalNet~\cite{YangLDG22focalnet}, and ConvNeXtV2~\cite{yang2024class_cam} exhibit significant accuracy improvements, either matching or even exceeding the accuracy of their EDTR counterparts (see Tab.~\ref{tab:sota}). The results highlight the versatility of the three proposed modules.
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
\subsection{Comparison with State-of-the-arts}
|
| 445 |
+
We compare SVTRv2 with 24 popular STR methods on \textit{Com}, \textit{U14M}, \textit{OST}, and \textit{LTB}. The results are presented in Tab.~\ref{tab:sota}. SVTRv2-B achieves top results in 9 out of the 15 evaluated scenarios and outperforms the most of EDTRs, showing a clear accuracy advantage. Meanwhile, it enjoys a small model size and a significant speed advantage. Specifically, compared to MAERec, the best-performed existing model on \textit{U14M}, SVTRv2-B shows an accuracy improvement of 0.97\% and 8$\times$ faster inference speed. Compared to CPPD, which is known for its wonderful accuracy-speed tradeoff, SVTRv2-B runs faster than 10\%, along with a 4.23\% accuracy increase on \textit{U14M}. Regarding \textit{OST}, as illustrated in the right part of Fig.~\ref{fig:case}, SVTRv2-B relies solely on a single visual model but achieves comparable accuracy to PARSeq, which employed the advanced permuted language model and is the best-performed existing model on \textit{OST}. In addition, SVTRv2-T and SVTRv2-S, the two smaller models also show leading accuracy compared with models of similar sizes, offering flexible solutions with different accuracy-speed tradeoff.
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
Two observations are derived by looking at the results on \textit{Curve} and \textit{MO}. First, SVTRv2 models significantly surpass existing CTC models. For example, compared to SVTR-B, SVTRv2-B gains prominent accuracy improvements of 14.4\% and 44.5\% on the two subsets, respectively. Second, as shown in Tab.~\ref{tab:tps_decoder},
|
| 449 |
+
comparing to previous methods employing rectification modules~\cite{shi2019aster,cvpr2020seed,zhang2020autostr,zheng2024cdistnet,yang2024class_cam,duijcai2022svtr,zheng2023tps++} or attention-based decoders ~\cite{Sheng2019nrtr,cvpr2021TransOCR,xie2022toward_cornertrans,yuICCV2023clipctr,jiang2023revisiting,yang2024class_cam,Xu_2024_CVPR_OTE,wang2020aaai_dan,li2019sar} to recognize irregular text, SVTRv2 also performs better than these methods on \textit{Curve}. In Fig.~\ref{fig:case}, \textit{TPS} (a rectification module) and \textit{MAERec*} (an attention-based decoder) do not recognize the extremely curved and rotated text correctly. In contrast, SVTRv2 successes. Moreover, as demonstrated by the results on \textit{LTB} in Tab.~\ref{tab:tps_decoder} and Fig.~\ref{fig:case_long}, \textit{TPS} and \textit{MAERec*} both do not effectively recognize long text, while SVTRv2 circumvents this limitation. These results indicate that our proposed modules successfully address the challenge of handling irregular text that existing CTC models encountered, while still preserving CTC's proficiency in recognizing long text.
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
SVTRv2 also exhibit strong performance in Chinese text recognition (see Tab.~\ref{tab:ch_all}), where SVTRv2-B achieve state of the art. The result implies its great adaptability to different languages. Moreover, it also shows superior performance on Chinese long text (\textit{Scene}$_{L>25}$). To sum, we evaluate SVTRv2 across a wide range of scenarios. The results consistently confirm that this CTC model beats leading EDTRs.
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
\begin{table}[t]\footnotesize
|
| 459 |
+
\centering
|
| 460 |
+
\setlength{\tabcolsep}{1.5pt}{
|
| 461 |
+
\begin{tabular}{c|c|cccc|cc|cc|c}
|
| 462 |
+
\toprule
|
| 463 |
+
\multicolumn{2}{c|}{} & $R_1$ & $R_2$ & $R_3$ & $R_4$ & \textit{Curve} & \textit{MO} & \textit{Com} & \textit{U14M} & \textit{LTB} \\
|
| 464 |
+
\midrule
|
| 465 |
+
\multicolumn{2}{c|}{SVTRv2} & \textbf{90.8} & \textbf{89.0} & \textbf{90.4} & \textbf{91.0} & \textbf{90.64} & \textbf{89.04} & \textbf{96.57} & \textbf{86.14} & \textbf{50.2} \\
|
| 466 |
+
\midrule
|
| 467 |
+
\multirow{2}{*}{TPS} & SVTR~\cite{duijcai2022svtr} & 86.8 & 82.3 & 77.3 & 75.7 & 82.19 & 86.12 & 94.62 & 78.44 & 0.0\\
|
| 468 |
+
& SVTRv2 & 89.5 & 85.1 & 78.4 & 83.8 & 84.71 & 88.97 & 94.62 & 79.94 & 0.5\\
|
| 469 |
+
\midrule
|
| 470 |
+
\multirow{2}{*}{\begin{tabular}[c]{@{}c@{}}MAE-\\REC*\end{tabular}} & SVTR~\cite{duijcai2022svtr} & 81.3 & 87.6 & 87.6 & 88.3 & 87.88 & 78.74 & 96.32 & 83.23 & 0.0\\
|
| 471 |
+
& SVTRv2 & 88.0 & 88.9 & 89.4 & 88.3 & 89.96 & 87.56 & 96.42 & 85.67 & 0.2\\
|
| 472 |
+
\bottomrule
|
| 473 |
+
\end{tabular}}
|
| 474 |
+
\caption{SVTRv2 and SVTR comparisons on irregular text and \textit{LTB}, where the rectification module (TPS) and the attention-based decoder (MAERec*) are employed.}
|
| 475 |
+
\label{tab:tps_decoder}
|
| 476 |
+
\end{table}
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
\begin{figure}[t]
|
| 482 |
+
\centering
|
| 483 |
+
\includegraphics[width=0.47\textwidth]{SVTRv2case_long.pdf}
|
| 484 |
+
\caption{Long text recognition examples. \textit{TPS} and \textit{MAERec*} denote SVTRv2 integrated with TPS and the decoder of MAERec.}
|
| 485 |
+
\label{fig:case_long}
|
| 486 |
+
\end{figure}
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
\begin{table}[t]\footnotesize
|
| 490 |
+
\centering
|
| 491 |
+
\setlength{\tabcolsep}{3pt}{
|
| 492 |
+
\begin{tabular}{r|ccccc|c|c}
|
| 493 |
+
\toprule
|
| 494 |
+
Method & \textit{Scene} & \textit{Web} & \textit{Doc} & \textit{HW} & Avg & \textit{Scene}$_{L>25}$ & \textit{Size} \\
|
| 495 |
+
\midrule
|
| 496 |
+
% CRNN~\cite{shi2017crnn} & 53.4 & 57.0 & 96.6 & 50.8 & 64.45 & 12.4 \\
|
| 497 |
+
ASTER~\cite{shi2019aster} & 61.3 & 51.7 & 96.2 & 37.0 & 61.55 & - & 27.2 \\
|
| 498 |
+
MORAN~\cite{pr2019MORAN} & 54.6 & 31.5 & 86.1 & 16.2 & 47.10 & - & 28.5 \\
|
| 499 |
+
SAR~\cite{li2019sar} & 59.7 & 58.0 & 95.7 & 36.5 & 62.48 & - & 27.8 \\
|
| 500 |
+
SEED~\cite{cvpr2020seed} & 44.7 & 28.1 & 91.4 & 21.0 & 46.30 & - & 36.1 \\
|
| 501 |
+
MASTER~\cite{pr2021MASTER} & 62.8 & 52.1 & 84.4 & 26.9 & 56.55 & - & 62.8 \\
|
| 502 |
+
ABINet~\cite{fang2021abinet} & 66.6 & 63.2 & 98.2 & 53.1 & 70.28 & - & 53.1 \\
|
| 503 |
+
TransOCR~\cite{cvpr2021TransOCR} & 71.3 & 64.8 & 97.1 & 53.0 & 71.55 & - & 83.9 \\
|
| 504 |
+
CCR-CLIP~\cite{yuICCV2023clipctr} & 71.3 & 69.2 & 98.3 & 60.3 & 74.78 & - & 62.0 \\
|
| 505 |
+
DCTC~\cite{Zhang_Lu_Liao_Huang_Li_Wang_Peng_2024_DCTC} & 73.9 & 68.5 & 99.4 & 51.0 & 73.20 & - & 40.8 \\
|
| 506 |
+
CAM~\cite{yang2024class_cam} & 76.0 & 69.3 & 98.1 & 59.2 & 76.80 & - & 135 \\
|
| 507 |
+
PARSeq*~\cite{BautistaA22PARSeq} & 84.2 & 82.8 & \textbf{99.5} & 63.0 & 82.37 &0.0 &28.9
|
| 508 |
+
\\
|
| 509 |
+
|
| 510 |
+
MAERec*~\cite{jiang2023revisiting} & \textbf{84.4} & 83.0 & \textbf{99.5} & 65.6 & 83.13 & 4.1 & 40.8\\
|
| 511 |
+
LISTER*~\cite{iccv2023lister} & 79.4 & 79.5 & 99.2 & 58.0 & 79.02 & 13.9 & 55.0 \\
|
| 512 |
+
DPTR*~\cite{zhao_2024_acmmm_dptr} & 80.0 & 79.6 & 98.9 & 64.4 & 80.73 & 0.0 &68.0 \\
|
| 513 |
+
CPPD*~\cite{du2023cppd} & 82.7 & 82.4 & 99.4 & 62.3 &81.72 & 0.0& 32.1\\
|
| 514 |
+
IGTR-AR*~\cite{du2024igtr} & 82.0 & 81.7 & \textbf{99.5} & 63.8 & 81.74 & 0.0 & 29.2 \\
|
| 515 |
+
SMTR*~\cite{du2024smtr} & 83.4 & 83.0 & 99.3 & 65.1 & 82.68 & 49.4 & 20.8 \\
|
| 516 |
+
\midrule
|
| 517 |
+
CRNN*~\cite{shi2017crnn} & 63.8 & 68.2 & 97.0 &
|
| 518 |
+
46.1 & 68.76 & 37.6 & 19.5 \\
|
| 519 |
+
|
| 520 |
+
SVTR-B*~\cite{duijcai2022svtr} & 77.9 & 78.7 & 99.2 &62.1 &79.49 & 22.9 &19.8 \\
|
| 521 |
+
\cellcolor[HTML]{EFEFEF}SVTRv2-T & \cellcolor[HTML]{EFEFEF}77.8 & \cellcolor[HTML]{EFEFEF}78.8 & \cellcolor[HTML]{EFEFEF}99.2 & \cellcolor[HTML]{EFEFEF}62.0 & \cellcolor[HTML]{EFEFEF}79.45 & \cellcolor[HTML]{EFEFEF}47.8 & \cellcolor[HTML]{EFEFEF}6.8\\
|
| 522 |
+
\cellcolor[HTML]{EFEFEF}SVTRv2-S & \cellcolor[HTML]{EFEFEF}81.1 & \cellcolor[HTML]{EFEFEF}81.2 & \cellcolor[HTML]{EFEFEF}99.3 & \cellcolor[HTML]{EFEFEF}65.0& \cellcolor[HTML]{EFEFEF}81.64 & \cellcolor[HTML]{EFEFEF}50.0 & \cellcolor[HTML]{EFEFEF}14.0\\
|
| 523 |
+
\cellcolor[HTML]{EFEFEF}SVTRv2-B & \cellcolor[HTML]{EFEFEF}83.5 & \cellcolor[HTML]{EFEFEF}\textbf{83.3} & \cellcolor[HTML]{EFEFEF}\textbf{99.5} & \cellcolor[HTML]{EFEFEF}\textbf{67.0} & \cellcolor[HTML]{EFEFEF}\textbf{83.31} & \cellcolor[HTML]{EFEFEF}\textbf{52.8} & \cellcolor[HTML]{EFEFEF}22.5 \\
|
| 524 |
+
\bottomrule
|
| 525 |
+
\end{tabular}}
|
| 526 |
+
\caption{Results on Chinese text dataset. * denotes that the model is retrained using the same setting as SVTRv2 (\textit{Sec.}~4.1).}
|
| 527 |
+
\label{tab:ch_all}
|
| 528 |
+
\end{table}
|
| 529 |
+
|
| 530 |
+
\begin{table}[t]\footnotesize
|
| 531 |
+
\centering
|
| 532 |
+
\setlength{\tabcolsep}{1.3pt}{
|
| 533 |
+
\begin{tabular}{r|cccccc|c|c|c|c}
|
| 534 |
+
\toprule
|
| 535 |
+
Method & \multicolumn{6}{c|}{Common Benchmarks (\textit{Com})} & Avg & \textit{OST} & \textit{Size} & \textit{FPS} \\
|
| 536 |
+
\midrule
|
| 537 |
+
E$^2$STR~\cite{Zhao_2024_CVPR_E2STR} & 99.2 & 98.6 & 98.7 & \textbf{93.8} & 96.7 & 99.3 & 97.71 & 80.7 & 211 & 7.86\\
|
| 538 |
+
VL-Reader~\cite{zhong_2024_acmmm_vlreader} & \textbf{99.6} & 99.1 & 98.7 & 92.6 & 97.5 & 99.3 & 97.80 & 86.2 & 142 & -\\
|
| 539 |
+
CLIP4STR~\cite{zhao_2025_tip_clip4str} & 99.4 & 98.6 & 98.3 & 90.8 & \textbf{97.8} & 99.0 & 97.32 & 82.8 & 158 & 14.1\\
|
| 540 |
+
DPTR~\cite{zhao_2024_acmmm_dptr} & 99.5 & \textbf{99.2} & 98.5 & 91.8 & 97.1 & 98.6 & 97.45 & - & 66.5 & 49.3 \\
|
| 541 |
+
IGTR~\cite{du2024igtr} & 99.2 & 98.3 & \textbf{98.8} & 92.0 & 96.8 & 99.0 & 97.34 & 86.5 & 24.1 & 58.3 \\
|
| 542 |
+
\midrule
|
| 543 |
+
SVTRv2-B & 99.2 & 98.6 & \textbf{98.8} & \textbf{93.8} & 97.2 & \textbf{99.4} & \textbf{97.83} & \textbf{86.9} & 19.8 & 143\\
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
\bottomrule
|
| 547 |
+
\end{tabular}}
|
| 548 |
+
\caption{Quantitative comparison of SVTRv2 with four advanced EDTRs experienced large-scale vision-language pretraining. For fairness, SVTRv2 is fine-tuned on \textit{Real} dataset to align with them.}
|
| 549 |
+
\label{tab:add_result}
|
| 550 |
+
\end{table}
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
In addition, recent EDTRs advances, e.g., E$^2$STR~\cite{Zhao_2024_CVPR_E2STR}, VL-Reader~\cite{zhong_2024_acmmm_vlreader}, CLIP4STR~\cite{zhao_2025_tip_clip4str}, and DPTR~\cite{zhao_2024_acmmm_dptr}, achieve impressive accuracy through large-scale vision-language pretraining. To align with these methods, we conduct an experiment by adding pretraining to SVTRv2 on synthetic datasets \cite{Synthetic,jaderberg14synthetic} and fine-tuning on \textit{Real} dataset \cite{BautistaA22PARSeq}. The results in Tab.~\ref{tab:add_result} show that this pretraining significantly enhances SVTRv2's performance, allowing it to surpass the aforementioned models. Notably, SVTRv2 achieves the highest average accuracy in \textit{Com} (97.8\%) while also demonstrating superior generalization to \textit{OST} (86.9\%). Compared to CLIP4STR, SVTRv2 achieves these results with only 14\% of the parameters and runs 10$\times$ faster, highlighting its efficiency. These findings again validate the effectiveness of our SVTRv2, as well as the proposed strategies or modules, i.e., MSR, FRM, and SGM.
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
\section{Conclusion}
|
| 557 |
+
|
| 558 |
+
In this paper, we have presented SVTRv2, an accurate and efficient CTC-based STR method. SVTRv2 is featured by developing the MSR and FRM modules to tackle the text irregular challenge, and devising the SGM module to endow linguistic context to the visual model. These upgrades maintain the simple inference architecture of CTC models, thus they remain quite efficient. More importantly, our thorough validation on multiple benchmarks demonstrates the effectiveness of SVTRv2. It achieves leading accuracy in various challenging scenarios covering regular, irregular, occluded, Chinese and long text, as well as whether employing pretraining. In addition, we retrain 24 methods from scratch on \textit{U14M-Filter} without data leakage. Their results on \textit{U14M} constitutes a comprehensive and reliable benchmark. We hope that SVTRv2 and this benchmark will further advance the development of the OCR community.
|
| 559 |
+
|
| 560 |
+
\noindent\textbf{Acknowledgement}
|
| 561 |
+
This work was supported by the National Natural Science Foundation of China (Nos. 62427819, 32341012, 62172103).
|
| 562 |
+
|
| 563 |
+
{
|
| 564 |
+
\small
|
| 565 |
+
\bibliographystyle{ieeenat_fullname}
|
| 566 |
+
\bibliography{main}
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
\input{X_suppl}
|
| 570 |
+
|
| 571 |
+
\end{document}
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/preamble.tex
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
%
|
| 2 |
+
% --- inline annotations
|
| 3 |
+
%
|
| 4 |
+
\newcommand{\red}[1]{{\color{red}#1}}
|
| 5 |
+
\newcommand{\todo}[1]{{\color{red}#1}}
|
| 6 |
+
\newcommand{\TODO}[1]{\textbf{\color{red}[TODO: #1]}}
|
| 7 |
+
% --- disable by uncommenting
|
| 8 |
+
% \renewcommand{\TODO}[1]{}
|
| 9 |
+
% \renewcommand{\todo}[1]{#1}
|
| 10 |
+
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/readme.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
https://arxiv.org/abs/2411.15858
|
| 3 |
+
|
| 4 |
+
https://github.com/Topdu/OpenOCR
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/svtrv2_ic15_6_h_fig.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aadc5fda36ddd6a16508fb5b868b6d94e0030e4026ec2c6bafa9f20336443e1b
|
| 3 |
+
size 4091603
|
SVTRv2: CTC Beats Encoder-Decoder Models in Scene Text Recognition/svtrv2_noic15_6_h_fig1.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4812834e90cdafc4c70536ead8ff7b1204992d17bff979f30fb6ee5b43df11a8
|
| 3 |
+
size 3538413
|