lordpotato commited on
Commit
9f5df1c
·
1 Parent(s): 5006573

initial commit, app.py not built

Browse files
Image_Captioning_Project.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # prompt: create gradio app to load the model and run it
2
+
3
+ import gradio as gr
4
+ import tensorflow as tf
5
+ import numpy as np
6
+ import requests
7
+ from tensorflow.keras.preprocessing.image import img_to_array, load_img
8
+ from tensorflow.keras.applications.inception_v3 import preprocess_input
9
+ import re
10
+
11
+ # Load the model
12
+ model = tf.keras.models.load_model('caption_model.h5')
13
+
14
+ # Load tokenizer (you'll need to adapt this to your actual tokenizer loading)
15
+ # Replace with your actual tokenizer loading
16
+ # Example using pickle
17
+ import pickle
18
+ with open('tokenizer.pickle', 'rb') as handle:
19
+ tokenizer = pickle.load(handle)
20
+
21
+ vocab_size = len(tokenizer.word_index) + 1
22
+ max_caption_length = 34 # Replace with your actual max_caption_length
23
+ cnn_output_dim = 2048
24
+
25
+
26
+ def preprocess_image(image_path):
27
+ img = load_img(image_path, target_size=(299, 299))
28
+ img = img_to_array(img)
29
+ img = np.expand_dims(img, axis=0)
30
+ img = preprocess_input(img)
31
+ return img
32
+
33
+ def greedy_generator(image_features):
34
+ in_text = 'start '
35
+ for _ in range(max_caption_length):
36
+ sequence = tokenizer.texts_to_sequences([in_text])[0]
37
+ sequence = tf.keras.preprocessing.sequence.pad_sequences([sequence], maxlen=max_caption_length).reshape((1,max_caption_length))
38
+ prediction = model.predict([image_features.reshape(1,cnn_output_dim), sequence], verbose=0)
39
+ idx = np.argmax(prediction)
40
+ word = tokenizer.index_word[idx]
41
+ in_text += ' ' + word
42
+ if word == 'end':
43
+ break
44
+ in_text = in_text.replace('start ', '')
45
+ in_text = in_text.replace(' end', '')
46
+ return in_text
47
+
48
+
49
+ def predict(image):
50
+ processed_image = preprocess_image(image)
51
+ image_features = model.layers[2].predict(processed_image, verbose = 0) # assuming InceptionV3 is the second layer
52
+ image_features = image_features.flatten()
53
+
54
+ caption = greedy_generator(image_features)
55
+ return caption
56
+
57
+ iface = gr.Interface(
58
+ fn=predict,
59
+ inputs=gr.Image(type="filepath"),
60
+ outputs="text",
61
+ title="Image Captioning",
62
+ description="Upload an image and get a caption!"
63
+ )
64
+
65
+ iface.launch()
caption_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cde03932fe1d948942415aa2cc0574bca844ea7a73f03b2f604189d3453d825
3
+ size 66378520
image-caption.ipynb ADDED
The diff for this file is too large to render. See raw diff