[DocTests] Fix some doc tests (#16889)

* [DocTests] Fix some doc tests

* hacky fix

* correct
This commit is contained in:
Patrick von Platen 2022-04-23 08:40:14 +02:00 committed by GitHub
parent 22fc93c4d9
commit 72728be3db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 8 deletions

View file

@ -252,10 +252,9 @@ The example above only shows a single example. You can also do batched inference
>>> model = T5ForConditionalGeneration.from_pretrained("t5-small")
>>> task_prefix = "translate English to German: "
>>> sentences = [
... "The house is wonderful.",
... "I like to work in NYC.",
>>> ] # use different length sentences to test batching
>>> # use different length sentences to test batching
>>> sentences = ["The house is wonderful.", "I like to work in NYC."]
>>> inputs = tokenizer([task_prefix + sentence for sentence in sentences], return_tensors="pt", padding=True)
>>> output_sequences = model.generate(

View file

@ -1210,14 +1210,14 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
Examples:
```python
>>> from transformers import BeitFeatureExtractor, BeitForSemanticSegmentation
>>> from transformers import AutoFeatureExtractor, BeitForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = BeitFeatureExtractor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
>>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
>>> inputs = feature_extractor(images=image, return_tensors="pt")

View file

@ -1140,14 +1140,14 @@ class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
Examples:
```python
>>> from transformers import Data2VecVisionFeatureExtractor, Data2VecVisionForSemanticSegmentation
>>> from transformers import AutoFeatureExtractor, Data2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = Data2VecVisionFeatureExtractor.from_pretrained("facebook/data2vec-vision-base")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/data2vec-vision-base")
>>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = feature_extractor(images=image, return_tensors="pt")