master e432e2a9c564 cached
7 files
11.7 KB
3.4k tokens
6 symbols
1 requests
Download .txt
Repository: jerrytigerxu/Simple-Python-Chatbot
Branch: master
Commit: e432e2a9c564
Files: 7
Total size: 11.7 KB

Directory structure:
gitextract_jv4jn7af/

├── README.md
├── chatbot_model.h5
├── chatgui.py
├── classes.pkl
├── intents.json
├── train_chatbot.py
└── words.pkl

================================================
FILE CONTENTS
================================================

================================================
FILE: README.md
================================================
# Simple-Python-Chatbot

Creating a simple Python chatbot using natural language processing and deep learning.



================================================
FILE: chatgui.py
================================================

import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np

from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))


def clean_up_sentence(sentence):
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
    return sentence_words

# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence

def bow(sentence, words, show_details=True):
    # tokenize the pattern
    sentence_words = clean_up_sentence(sentence)
    # bag of words - matrix of N words, vocabulary matrix
    bag = [0]*len(words)
    for s in sentence_words:
        for i,w in enumerate(words):
            if w == s:
                # assign 1 if current word is in the vocabulary position
                bag[i] = 1
                if show_details:
                    print ("found in bag: %s" % w)
    return(np.array(bag))

def predict_class(sentence, model):
    # filter out predictions below a threshold
    p = bow(sentence, words,show_details=False)
    res = model.predict(np.array([p]))[0]
    ERROR_THRESHOLD = 0.25
    results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
    # sort by strength of probability
    results.sort(key=lambda x: x[1], reverse=True)
    return_list = []
    for r in results:
        return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
    return return_list

def getResponse(ints, intents_json):
    tag = ints[0]['intent']
    list_of_intents = intents_json['intents']
    for i in list_of_intents:
        if(i['tag']== tag):
            result = random.choice(i['responses'])
            break
    return result

def chatbot_response(msg):
    ints = predict_class(msg, model)
    res = getResponse(ints, intents)
    return res


#Creating GUI with tkinter
import tkinter
from tkinter import *


def send():
    msg = EntryBox.get("1.0",'end-1c').strip()
    EntryBox.delete("0.0",END)

    if msg != '':
        ChatLog.config(state=NORMAL)
        ChatLog.insert(END, "You: " + msg + '\n\n')
        ChatLog.config(foreground="#442265", font=("Verdana", 12 ))

        res = chatbot_response(msg)
        ChatLog.insert(END, "Bot: " + res + '\n\n')

        ChatLog.config(state=DISABLED)
        ChatLog.yview(END)


base = Tk()
base.title("Hello")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)

#Create Chat window
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)

ChatLog.config(state=DISABLED)

#Bind scrollbar to Chat window
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set

#Create Button to send message
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
                    bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
                    command= send )

#Create the box to enter message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
#EntryBox.bind("<Return>", send)


#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)

base.mainloop()


================================================
FILE: classes.pkl
================================================
(lp0
Vadverse_drug
p1
aVblood_pressure
p2
aVblood_pressure_search
p3
aVgoodbye
p4
aVgreeting
p5
aVhospital_search
p6
aVoptions
p7
aVpharmacy_search
p8
aVthanks
p9
a.

================================================
FILE: intents.json
================================================
{"intents": [
        {"tag": "greeting",
         "patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"],
         "responses": ["Hello, thanks for asking", "Good to see you again", "Hi there, how can I help?"],
         "context": [""]
        },
        {"tag": "goodbye",
         "patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"],
         "responses": ["See you!", "Have a nice day", "Bye! Come back again soon."],
         "context": [""]
        },
        {"tag": "thanks",
         "patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"],
         "responses": ["Happy to help!", "Any time!", "My pleasure"],
         "context": [""]
        },
        {"tag": "noanswer",
         "patterns": [],
         "responses": ["Sorry, can't understand you", "Please give me more info", "Not sure I understand"],
         "context": [""]
        },
        {"tag": "options",
         "patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"],
         "responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"],
         "context": [""]
        },
        {"tag": "adverse_drug",
         "patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ],
         "responses": ["Navigating to Adverse drug reaction module"],
         "context": [""]
        },
        {"tag": "blood_pressure",
         "patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ],
         "responses": ["Navigating to Blood Pressure module"],
         "context": [""]
        },
        {"tag": "blood_pressure_search",
         "patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ],
         "responses": ["Please provide Patient ID", "Patient ID?"],
         "context": ["search_blood_pressure_by_patient_id"]
        },
        {"tag": "search_blood_pressure_by_patient_id",
         "patterns": [],
         "responses": ["Loading Blood pressure result for Patient"],
         "context": [""]
        },
        {"tag": "pharmacy_search",
         "patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ],
         "responses": ["Please provide pharmacy name"],
         "context": ["search_pharmacy_by_name"]
        },
        {"tag": "search_pharmacy_by_name",
         "patterns": [],
         "responses": ["Loading pharmacy details"],
         "context": [""]
        },
        {"tag": "hospital_search",
         "patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ],
         "responses": ["Please provide hospital name or location"],
         "context": ["search_hospital_by_params"]
        },
        {"tag": "search_hospital_by_params",
         "patterns": [],
         "responses": ["Please provide hospital type"],
         "context": ["search_hospital_by_type"]
        },
        {"tag": "search_hospital_by_type",
         "patterns": [],
         "responses": ["Loading hospital details"],
         "context": [""]
        }
   ]
}


================================================
FILE: train_chatbot.py
================================================
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import json
import pickle

import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random

words=[]
classes = []
documents = []
ignore_words = ['?', '!']
data_file = open('intents.json').read()
intents = json.loads(data_file)


for intent in intents['intents']:
    for pattern in intent['patterns']:

        # take each word and tokenize it
        w = nltk.word_tokenize(pattern)
        words.extend(w)
        # adding documents
        documents.append((w, intent['tag']))

        # adding classes to our class list
        if intent['tag'] not in classes:
            classes.append(intent['tag'])

words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))

classes = sorted(list(set(classes)))

print (len(documents), "documents")

print (len(classes), "classes", classes)

print (len(words), "unique lemmatized words", words)


pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))

# initializing training data
training = []
output_empty = [0] * len(classes)
for doc in documents:
    # initializing bag of words
    bag = []
    # list of tokenized words for the pattern
    pattern_words = doc[0]
    # lemmatize each word - create base word, in attempt to represent related words
    pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
    # create our bag of words array with 1, if word match found in current pattern
    for w in words:
        bag.append(1) if w in pattern_words else bag.append(0)

    # output is a '0' for each tag and '1' for current tag (for each pattern)
    output_row = list(output_empty)
    output_row[classes.index(doc[1])] = 1

    training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists. X - patterns, Y - intents
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")


# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

#fitting and saving the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)

print("model created")


================================================
FILE: words.pkl
================================================
(lp0
V's
p1
aV,
p2
aVa
p3
aVadverse
p4
aVall
p5
aVanyone
p6
aVare
p7
aVawesome
p8
aVbe
p9
aVbehavior
p10
aVblood
p11
aVby
p12
aVbye
p13
aVcan
p14
aVcausing
p15
aVchatting
p16
aVcheck
p17
aVcould
p18
aVdata
p19
aVday
p20
aVdetail
p21
aVdo
p22
aVdont
p23
aVdrug
p24
aVentry
p25
aVfind
p26
aVfor
p27
aVgive
p28
aVgood
p29
aVgoodbye
p30
aVhave
p31
aVhello
p32
aVhelp
p33
aVhelpful
p34
aVhelping
p35
aVhey
p36
aVhi
p37
aVhistory
p38
aVhola
p39
aVhospital
p40
aVhow
p41
aVi
p42
aVid
p43
aVis
p44
aVlater
p45
aVlist
p46
aVload
p47
aVlocate
p48
aVlog
p49
aVlooking
p50
aVlookup
p51
aVmanagement
p52
aVme
p53
aVmodule
p54
aVnearby
p55
aVnext
p56
aVnice
p57
aVof
p58
aVoffered
p59
aVopen
p60
aVpatient
p61
aVpharmacy
p62
aVpressure
p63
aVprovide
p64
aVreaction
p65
aVrelated
p66
aVresult
p67
aVsearch
p68
aVsearching
p69
aVsee
p70
aVshow
p71
aVsuitable
p72
aVsupport
p73
aVtask
p74
aVthank
p75
aVthanks
p76
aVthat
p77
aVthere
p78
aVtill
p79
aVtime
p80
aVto
p81
aVtransfer
p82
aVup
p83
aVwant
p84
aVwhat
p85
aVwhich
p86
aVwith
p87
aVyou
p88
a.
Download .txt
gitextract_jv4jn7af/

├── README.md
├── chatbot_model.h5
├── chatgui.py
├── classes.pkl
├── intents.json
├── train_chatbot.py
└── words.pkl
Download .txt
SYMBOL INDEX (6 symbols across 1 files)

FILE: chatgui.py
  function clean_up_sentence (line 17) | def clean_up_sentence(sentence):
  function bow (line 24) | def bow(sentence, words, show_details=True):
  function predict_class (line 38) | def predict_class(sentence, model):
  function getResponse (line 51) | def getResponse(ints, intents_json):
  function chatbot_response (line 60) | def chatbot_response(msg):
  function send (line 71) | def send():
Condensed preview — 7 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (13K chars).
[
  {
    "path": "README.md",
    "chars": 112,
    "preview": "# Simple-Python-Chatbot\n\nCreating a simple Python chatbot using natural language processing and deep learning.\n\n"
  },
  {
    "path": "chatgui.py",
    "chars": 3625,
    "preview": "\r\nimport nltk\r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = WordNetLemmatizer()\r\nimport pickle\r\nimport numpy as"
  },
  {
    "path": "classes.pkl",
    "chars": 165,
    "preview": "(lp0\nVadverse_drug\np1\naVblood_pressure\np2\naVblood_pressure_search\np3\naVgoodbye\np4\naVgreeting\np5\naVhospital_search\np6\naVo"
  },
  {
    "path": "intents.json",
    "chars": 3844,
    "preview": "{\"intents\": [\n        {\"tag\": \"greeting\",\n         \"patterns\": [\"Hi there\", \"How are you\", \"Is anyone there?\",\"Hey\",\"Hol"
  },
  {
    "path": "train_chatbot.py",
    "chars": 3174,
    "preview": "import nltk\r\nnltk.download('punkt')\r\nnltk.download('wordnet')\r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = Wor"
  },
  {
    "path": "words.pkl",
    "chars": 1032,
    "preview": "(lp0\nV's\np1\naV,\np2\naVa\np3\naVadverse\np4\naVall\np5\naVanyone\np6\naVare\np7\naVawesome\np8\naVbe\np9\naVbehavior\np10\naVblood\np11\naVb"
  }
]

// ... and 1 more files (download for full content)

About this extraction

This page contains the full source code of the jerrytigerxu/Simple-Python-Chatbot GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 7 files (11.7 KB), approximately 3.4k tokens, and a symbol index with 6 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!