From 205bf0b7eab7c81140148b2d6ecb809cc42fc856 Mon Sep 17 00:00:00 2001 From: GmailB Date: Tue, 29 Sep 2020 09:18:01 +0200 Subject: [PATCH] Update README.md (#7444) Hi, just corrected the example code, add 2 links and fixed some typos --- model_cards/unideeplearning/polibert_sa/README.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/model_cards/unideeplearning/polibert_sa/README.md b/model_cards/unideeplearning/polibert_sa/README.md index 4fe254637..7a71dc936 100644 --- a/model_cards/unideeplearning/polibert_sa/README.md +++ b/model_cards/unideeplearning/polibert_sa/README.md @@ -12,15 +12,22 @@ widget: ## Model description -This model performs sentiment analysis on Italian political twitter sentences. It was trained starting from an instance of "bert-base-italian-uncased-xxl" and fine-tuned on an Italian dataset of tweets. +This model performs sentiment analysis on Italian political twitter sentences. It was trained starting from an instance of "bert-base-italian-uncased-xxl" and fine-tuned on an Italian dataset of tweets. You can try it out at https://www.unideeplearning.com/twitter_sa/ (in italian!) #### Hands-on ```python import torch from torch import nn +from transformers import AutoTokenizer, AutoModelForSequenceClassification -text = "Giueseppe Rossi è un pessimo politico" +tokenizer = AutoTokenizer.from_pretrained("unideeplearning/polibert_sa") +model = AutoModelForSequenceClassification.from_pretrained("unideeplearning/polibert_sa") + + + + +text = "Giuseppe Rossi è un pessimo politico" input_ids = tokenizer.encode(text, add_special_tokens=True, return_tensors= 'pt') logits, = model(input_ids) @@ -41,4 +48,6 @@ print(prob.argmax().tolist()) ## Acknowledgments Thanks to the support from: -the [Hugging Face](https://huggingface.co/), Unione Professionisti (https://www.unioneprofessionisti.com/) +the [Hugging Face](https://huggingface.co/), https://www.unioneprofessionisti.com + +https://www.unideeplearning.com/