Skip to content

Commit

Permalink
Update models URIs in notebook (openai#52)
Browse files Browse the repository at this point in the history
  • Loading branch information
syvb authored Mar 8, 2021
1 parent fd6c144 commit beba48f
Showing 1 changed file with 26 additions and 23 deletions.
49 changes: 26 additions & 23 deletions notebooks/Interacting_with_CLIP.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
"else:\n",
" torch_version_suffix = \"+cu110\""
],
"execution_count": 1,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand All @@ -83,7 +83,7 @@
"source": [
"! pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex"
],
"execution_count": 2,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -118,7 +118,7 @@
"\n",
"print(\"Torch version:\", torch.__version__)"
],
"execution_count": 3,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -147,10 +147,13 @@
},
"source": [
"MODELS = {\n",
" \"ViT-B/32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\",\n",
" \"RN50\": \"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt\",\n",
" \"RN101\": \"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt\",\n",
" \"RN50x4\": \"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt\",\n",
" \"ViT-B/32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\", \n",
"}"
],
"execution_count": 4,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -165,7 +168,7 @@
"source": [
"! wget {MODELS[\"ViT-B/32\"]} -O model.pt"
],
"execution_count": 5,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -206,7 +209,7 @@
"print(\"Context length:\", context_length)\n",
"print(\"Vocab size:\", vocab_size)"
],
"execution_count": 6,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -250,7 +253,7 @@
"image_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).cuda()\n",
"image_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).cuda()"
],
"execution_count": 7,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -277,7 +280,7 @@
"! pip install ftfy regex\n",
"! wget https://openaipublic.azureedge.net/clip/bpe_simple_vocab_16e6.txt.gz -O bpe_simple_vocab_16e6.txt.gz"
],
"execution_count": 8,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -438,7 +441,7 @@
" text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n",
" return text\n"
],
"execution_count": 9,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -485,7 +488,7 @@
" \"coffee\": \"a cup of coffee on a saucer\"\n",
"}"
],
"execution_count": 10,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -520,7 +523,7 @@
"\n",
"plt.tight_layout()\n"
],
"execution_count": 11,
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
Expand Down Expand Up @@ -561,7 +564,7 @@
"image_input -= image_mean[:, None, None]\n",
"image_input /= image_std[:, None, None]"
],
"execution_count": 12,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -573,7 +576,7 @@
"tokenizer = SimpleTokenizer()\n",
"text_tokens = [tokenizer.encode(\"This is \" + desc) for desc in texts]"
],
"execution_count": 13,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -592,7 +595,7 @@
"\n",
"text_input = text_input.cuda()"
],
"execution_count": 14,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -605,7 +608,7 @@
" image_features = model.encode_image(image_input).float()\n",
" text_features = model.encode_text(text_input).float()"
],
"execution_count": 15,
"execution_count": null,
"outputs": []
},
{
Expand All @@ -629,7 +632,7 @@
"text_features /= text_features.norm(dim=-1, keepdim=True)\n",
"similarity = text_features.cpu().numpy() @ image_features.cpu().numpy().T"
],
"execution_count": 16,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -664,7 +667,7 @@
"\n",
"plt.title(\"Cosine similarity between text and image features\", size=20)"
],
"execution_count": 17,
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
Expand Down Expand Up @@ -722,7 +725,7 @@
"\n",
"cifar100 = CIFAR100(os.path.expanduser(\"~/.cache\"), transform=preprocess, download=True)"
],
"execution_count": 18,
"execution_count": null,
"outputs": [
{
"output_type": "stream",
Expand Down Expand Up @@ -753,7 +756,7 @@
"text_input = text_input.cuda()\n",
"text_input.shape"
],
"execution_count": 19,
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
Expand Down Expand Up @@ -782,7 +785,7 @@
"text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)\n",
"top_probs, top_labels = text_probs.cpu().topk(5, dim=-1)"
],
"execution_count": 20,
"execution_count": null,
"outputs": []
},
{
Expand Down Expand Up @@ -815,7 +818,7 @@
"plt.subplots_adjust(wspace=0.5)\n",
"plt.show()"
],
"execution_count": 21,
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
Expand Down Expand Up @@ -844,7 +847,7 @@
"source": [
""
],
"execution_count": 21,
"execution_count": null,
"outputs": []
}
]
Expand Down

0 comments on commit beba48f

Please sign in to comment.