File size: 6,496 Bytes
9b9ebe1 85b647c caf9d9f acb12ba caf9d9f acb12ba caf9d9f c4ffc80 9b9ebe1 acb12ba 9b9ebe1 acb12ba 9b9ebe1 acb12ba 9b9ebe1 acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba caf9d9f acb12ba 9b9ebe1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- banking77
metrics:
- accuracy
- f1
widget:
- text: Could you assist me in finding my lost card?
example_title: Example 1
- text: I found my lost card. Am I still able to use it?
example_title: Example 2
- text: Hey, I thought my topup was all done but now the money is gone again – what’s
up with that?
example_title: Example 3
- text: Tell me why my topup wouldn’t go through?
example_title: Example 4
base_model: distilbert-base-uncased
model-index:
- name: distilbert-base-uncased-finetuned-banking77
results:
- task:
type: text-classification
name: Text Classification
dataset:
name: banking77
type: banking77
args: default
metrics:
- type: accuracy
value: 0.925
name: Accuracy
- type: f1
value: 0.925018570680639
name: F1
- task:
type: text-classification
name: Text Classification
dataset:
name: banking77
type: banking77
config: default
split: test
metrics:
- type: accuracy
value: 0.925
name: Accuracy
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZDllOGYzMjM1MDlmMzc1MjFhNDAxZGRkOTI2Y2M3MTExNWViZjIxMWMyMzcyMzE2YzA2M2U1ODBmMDk4ZTc3ZiIsInZlcnNpb24iOjF9.m4W8jIWPuO_PgIo9V1K3I3DhHQ4ZbV1CDK3NOUhQS4xgrFE7DzFkPoku9tIhID4HfVwh9CdlTFBV5DNk-YoCCw
- type: precision
value: 0.9282769473964405
name: Precision Macro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTNjNDEzMTY4ZjYyZDZmNmZhNDgxYzZhMmU2NDk1OTQ2ODQ3MDllZWU3ZjY4ZGJkNGQ0MTcxZDAxMjBiNzdmMSIsInZlcnNpb24iOjF9.yH8l0iirnQlO4VAlXR1mUxSJ68N_ro8f4v_56tMidva8dixFPPpQ4_zWQ9br6iBVH-pMHFrPL2vSsRldxRjyAw
- type: precision
value: 0.925
name: Precision Micro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNzYwOTExZDI0ZGVhM2E5NzA0YTFhYjQ3NThlZWQ3ZTg2YTVjYjZhMzU1MDI3ZjkyY2NiNDBlYTAzYzYwYjdmMyIsInZlcnNpb24iOjF9.Qj0ni7-zG991npWW9NTutH1qUkLLZnJ13TxWZynxfef4VBKpaC9Ar-4Z8NSgrlNvghndvVHvvYQ47zoUnKYfCg
- type: precision
value: 0.9282769473964405
name: Precision Weighted
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOWU4NmZlYmYyMTI5OTIwMjAwNDk4OTQ3NGYzYzk0MzgyYmFiZTUyMzEzOGVjMTA2MzJhZGI1NzdjOWU5N2I5ZiIsInZlcnNpb24iOjF9.YTmQFFjzy2387Puvwnj_BtYLNXtEykUlHSvl027TfnpYyjCmsixU3r5yZjkI4ZUy6W5Xy5t9bluhZupmyGx5CA
- type: recall
value: 0.9250000000000002
name: Recall Macro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZmQ2ZWFlN2NkN2IzMGE4NzBhMDI2MTk3OGYxMmMxNDNmMzhiMmU3NTAzYmFiNTMyMDg0OTczZGYyNTE2NDc5MiIsInZlcnNpb24iOjF9.lJbWAB8po1DgJO-Or_C-b7F5jp_IMMPhxcUqyJEbT5Au94s6EgIlFHzN7eK9tCC2c-AvCcs-hpItnQjUqqE1AQ
- type: recall
value: 0.925
name: Recall Micro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiM2Y0ZGVlN2M0YjQ5NzcxMTJkNmVhNjc3ZjI1OTQyOTQ2MmExNDFhODVjM2VmMGM0NGNlY2U1ZDYwMjU0MmZjZCIsInZlcnNpb24iOjF9.Gqm7KCYZpm97nE4J-YazB3cWIVwCmRWrRoAY6Yrc3YP4GqVpSr6isfH53CtZ6ka5byohMeFb8_XbIYW3xRUCCA
- type: recall
value: 0.925
name: Recall Weighted
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNGExMGRjNTU5ZGRiMThkMTAzZjA0ODkzZDJiNzMxMTZjYmJkNjJkZjk0YzY4MTA4MjFmNjk0Y2IzNjkxZTJiMyIsInZlcnNpb24iOjF9.GaYizK6iJY3zcgrkMEAOrTanjjYTPOEO8hOoe66lBQ7J5DIIFQBDhY89qOYFjuZnypgT7_s7B499Lx_XJiMNCg
- type: f1
value: 0.9250185706806391
name: F1 Macro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMzc1NGIxYmRkZTY1MGM0NTQzNTU0ZjEwMDdjNjIxYmYxYTViZWY0MGNkMWMyZjBjYjNlMDNlY2UyNTg4ZmE4OCIsInZlcnNpb24iOjF9.06bcSulLP3e22fwjcvqEjr_5Pa1kzMSsKiw_qI07tfXqtigw7RGKW80RtfssaSHbfRmdmnGkqFI7UIO56q1UCQ
- type: f1
value: 0.925
name: F1 Micro
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYTg2NmU2MzFmN2NmMDA5NDg3NzQ1ZWEyZGU2ZDdmZDU5ZTcxOTYwN2RkZWM0OTYyY2FlNGRmMzBmODYxZDMxMSIsInZlcnNpb24iOjF9.gLeGhILsq8eP65ORrWpeA_hNz0zuqBmI-r5g7HR-qZdDyMyEEZrBlFqSDGcSRdqCvjMS7zRXksHSa6Y8QzKPCA
- type: f1
value: 0.925018570680639
name: F1 Weighted
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNTMyNzVlZjgzZDZhOTUzZjZjYjg1OTU1NjcxMWQ1NDIxM2M2MzcwZmQ2YzkyNjc3ZjRlNjYwMTY0NDZmNDQxMCIsInZlcnNpb24iOjF9.rfqfrjWGp0_PC7MfUfl53YAwdN5FOZWzf5JF7wM9URSbhWHtWeh170xf9m1N3pCMseE5-66KndSyCZttt8C7Cg
- type: loss
value: 0.2934279143810272
name: loss
verified: true
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiY2UyY2U2MGM2YWFmNDA0MTUyMTg1MzdkZjRiNmI0NTVlMGJiNmViNDE1ZDc0YThlYjBhNWJlMTM0NzQ0NjkxYSIsInZlcnNpb24iOjF9.SuSSft3vlKA8Yx5vXP9bLRSaIa0D7pGtMHHYXgAS33baC82iTHyD8H41NQFk1b3B0dMHHLC458yH4gEhgBdzBA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-banking77
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the banking77 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2935
- Accuracy: 0.925
- F1: 0.9250
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 9.686210354742596e-05
- train_batch_size: 64
- eval_batch_size: 32
- seed: 40
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| No log | 1.0 | 126 | 1.1457 | 0.7896 | 0.7685 |
| No log | 2.0 | 252 | 0.4673 | 0.8906 | 0.8889 |
| No log | 3.0 | 378 | 0.3488 | 0.9150 | 0.9151 |
| 0.9787 | 4.0 | 504 | 0.3238 | 0.9180 | 0.9179 |
| 0.9787 | 5.0 | 630 | 0.3126 | 0.9225 | 0.9226 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.11.6
|