faisalraza commited on
Commit
c4745ff
1 Parent(s): d9855f7

initial commit

Browse files
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: cc-by-nc-sa-4.0
4
+ pipeline_tag: document-question-answering
5
+ tags:
6
+ - layoutlm
7
+ - document-question-answering
8
+ - pdf
9
+ - invoices
10
+ widget:
11
+ - text: "What is the invoice number?"
12
+ src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png"
13
+ - text: "What is the purchase amount?"
14
+ src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg"
15
+ ---
16
+
17
+ # LayoutLM for Invoices
18
+
19
+ This is a fine-tuned version of the multi-modal [LayoutLM](https://aka.ms/layoutlm) model for the task of question answering on invoices and other documents. It has been fine-tuned on a proprietary dataset of
20
+ invoices as well as both [SQuAD2.0](https://huggingface.co/datasets/squad_v2) and [DocVQA](https://www.docvqa.org/) for general comprehension.
21
+
22
+ ## Non-consecutive tokens
23
+
24
+ Unlike other QA models, which can only extract consecutive tokens (because they predict the start and end of a sequence), this model can predict longer-range, non-consecutive sequences with an additional
25
+ classifier head. For example, QA models often encounter this failure mode:
26
+
27
+ ### Before
28
+
29
+ ![Broken Address](./before.png)
30
+
31
+
32
+ ### After
33
+
34
+ However this model is able to predict non-consecutive tokens and therefore the address correctly:
35
+
36
+ ![Two-line Address](./after.png)
37
+
38
+ ## Getting started with the model
39
+
40
+ The best way to use this model is via [DocQuery](https://github.com/impira/docquery).
41
+
42
+ ## About us
43
+
44
+ This model was created by the team at [Impira](https://www.impira.com/).
after.png ADDED
before.png ADDED
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "impira/layoutlm-invoices",
3
+ "architectures": [
4
+ "LayoutLMForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_2d_position_embeddings": 1024,
18
+ "max_position_embeddings": 514,
19
+ "model_type": "layoutlm",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_layers": 12,
22
+ "pad_token_id": 1,
23
+ "position_embedding_type": "absolute",
24
+ "tokenizer_class": "RobertaTokenizer",
25
+ "token_classification": true,
26
+ "token_classifier_reduction": "sum",
27
+ "token_classifier_constant": 0.1,
28
+ "transformers_version": "4.22.0.dev0",
29
+ "type_vocab_size": 1,
30
+ "use_cache": true,
31
+ "vocab_size": 50265
32
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [tool.black]
2
+ line-length = 119
3
+ target-version = ['py35']
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bacb21323970333f86ca1768bc8bd925471e8495521eae181cdff984c11b94f3
3
+ size 511263905
setup.cfg ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [isort]
2
+ default_section = FIRSTPARTY
3
+ ensure_newline_before_comments = True
4
+ force_grid_wrap = 0
5
+ include_trailing_comma = True
6
+ known_first_party = transformers
7
+
8
+ line_length = 119
9
+ lines_after_imports = 2
10
+ multi_line_output = 3
11
+ use_parentheses = True
12
+
13
+ [flake8]
14
+ ignore = E203, E501, E741, W503, W605
15
+ max-line-length = 119
16
+
17
+ [tool:pytest]
18
+ doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-base", "add_prefix_space": true}
vocab.json ADDED
The diff for this file is too large to render. See raw diff