!python -m spacy download en_core_web_trf

import spacy
nlp = spacy.load('en_core_web_trf')
doc = nlp("Jeff Bezos is the CEO of Amazon. He went to university and was not born in Madeupville")

[(ent,ent.label_) for ent in doc.ents]

!pip install numpy==1.20.0

import numpy as np

print(np.__version__)
arr = np.zeros((1,2))+np.zeros((3,2,1))
arr

np.broadcast_shapes((1, 2),(3,2,1))

from transformers import pipeline
classifier = pipeline("zero-shot-classification")

sequence_to_classify = "one day I will see the world"
candidate_labels = ['travel', 'cooking', 'dancing']
classifier(sequence_to_classify, candidate_labels)

from transformers import AutoModelForSequenceClassification, AutoTokenizer
nli_model = AutoModelForSequenceClassification.from_pretrained('joeddav/xlm-roberta-large-xnli')
tokenizer = AutoTokenizer.from_pretrained('joeddav/xlm-roberta-large-xnli')
premise = sequence
hypothesis = f'This example is {label}.'
# run through model pre-trained on MNLI
x = tokenizer.encode(premise, hypothesis, return_tensors='pt',
truncation_strategy='only_first')
logits = nli_model(x.to(device))[0]
# we throw away "neutral" (dim 1) and take the probability of
# "entailment" (2) as the probability of the label being true
entail_contradiction_logits = logits[:,[0,2]]
probs = entail_contradiction_logits.softmax(dim=1)
prob_label_is_true = probs[:,1]