0% found this document useful (0 votes)
20 views

AD3311 Lab Program

AD3311 Artificial Intelligence

Uploaded by

953623243008
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
20 views

AD3311 Lab Program

AD3311 Artificial Intelligence

Uploaded by

953623243008
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 24

1.

To implement basic search strategies – 8-Puzzle Problem, – 8-Queens Problem, Crypt


arithmetic.

Program:

# 8-Puzzle Problem Solver


class PuzzleSolution:
def solve(self, board):
board_dict = {}
flatten = []

for i in range(len(board)):
flatten += board[i]

flatten = tuple(flatten)
board_dict[flatten] = 0

if flatten == (0, 1, 2, 3, 4, 5, 6, 7, 8):


return 0

return self.get_paths(board_dict)

def get_paths(self, board_dict):


cnt = 0
while True:
current_nodes = [x for x in board_dict if board_dict[x] == cnt]

if len(current_nodes) == 0:
return -1

for node in current_nodes:


next_moves = self.find_next(node)

for move in next_moves:


if move not in board_dict:
board_dict[move] = cnt + 1
if move == (0, 1, 2, 3, 4, 5, 6, 7, 8):
return cnt + 1
cnt += 1

def find_next(self, node):


moves = {
0: [1, 3],
1: [0, 2, 4],
2: [1, 5],
3: [0, 4, 6],
4: [1, 3, 5, 7],
5: [2, 4, 8],
6: [3, 7],
7: [4, 6, 8],
8: [5, 7],
}

results = []
pos_0 = node.index(0)

for move in moves[pos_0]:


new_node = list(node)
new_node[move], new_node[pos_0] = new_node[pos_0], new_node[move]
results.append(tuple(new_node))

return results

# 8-Queens Solver
def attack(i, j, board):

for k in range(0, 8):


if board[i][k] == 1 or board[k][j] == 1:
return True

for k in range(0, 8):


for l in range(0, 8):
if (k + l == i + j) or (k - l == i - j):
if board[k][l] == 1:
return True
return False

def eight_queens(n, board):


if n == 0:
return True
for i in range(0, 8):
for j in range(0, 8):
if (not attack(i, j, board)) and (board[i][j] != 1):

board[i][j] = 1
if eight_queens(n - 1, board):
return True
board[i][j] = 0
return False

# Cryptarithmetic Solver
def find_value(word, assigned):
num = 0
for char in word:
num = num * 10
num += assigned[char]
return num

def is_valid_assignment(word1, word2, result, assigned):

if assigned[word1[0]] == 0 or assigned[word2[0]] == 0 or assigned[result[0]] == 0:


return False
return True

def _solve(word1, word2, result, letters, assigned, solutions):


if not letters:
if is_valid_assignment(word1, word2, result, assigned):
num1 = find_value(word1, assigned)
num2 = find_value(word2, assigned)
num_result = find_value(result, assigned)
if num1 + num2 == num_result:
solutions.append((f'{num1} + {num2} = {num_result}', assigned.copy()))
return

cur_letter = letters.pop()
for num in range(10):
if num not in assigned.values():
assigned[cur_letter] = num
_solve(word1, word2, result, letters, assigned, solutions)
assigned.pop(cur_letter)
letters.append(cur_letter)

def solve_cryptarithmetic(word1, word2, result):


letters = sorted(set(word1) | set(word2) | set(result))

if len(result) > max(len(word1), len(word2)) + 1 or len(letters) > 10:


print('0 Solutions!')
return
solutions = []
_solve(word1, word2, result, letters, {}, solutions)

if solutions:
print('\nSolutions:')
for soln in solutions:
print(f'{soln[0]}\t{soln[1]}')
else:
print('0 Solutions!')

# Main Program
if __name__ == '__main__':
print("Select a problem to solve:")
print("1. 8-Puzzle Problem")
print("2. 8-Queens Problem")
print("3. Cryptarithmetic Puzzle")

choice = int(input("Enter your choice: "))

if choice == 1:
# 8-puzzle problem
matrix = [
[3, 1, 2],
[4, 7, 5],
[6, 8, 0]
]
ob = PuzzleSolution()
print("NO OF MOVES==", ob.solve(matrix))

elif choice == 2:
# 8-Queens Problem
N = 8 # Fixed to 8-Queens
board = [[0] * N for _ in range(N)]
eight_queens(N, board)
print("Solution for 8-Queens Problem:")
for row in board:
print(row)

elif choice == 3:
# Cryptarithmetic Puzzle
word1 = input("Enter WORD1: ").upper()
word2 = input("Enter WORD2: ").upper()
result = input("Enter RESULT: ").upper()
if not word1.isalpha() or not word2.isalpha() or not result.isalpha():
raise TypeError("Inputs should only consist of alphabets.")

solve_cryptarithmetic(word1, word2, result)


else:
print("Invalid choice!")

OUTPUT:
8-Puzzle Problem:
NO OF MOVES== 4

8-Queens Problem:
Solution for 8-Queens Problem:
[1, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 1, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 1]
[0, 0, 0, 0, 0, 1, 0, 0]
[0, 0, 1, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 1, 0]
[0, 1, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 1, 0, 0, 0, 0]

Cryptarithmetic Puzzle:
Solutions:
9567 + 1085 = 10652 {'S': 9, 'E': 5, 'N': 6, 'D': 7, 'M': 1, 'O': 0, 'R': 8, 'Y': 2}
2. Implement A* and memory bounded A* algorithms.

Program:

import heapq

# Heuristic function: Manhattan Distance


def manhattan_distance(state, goal):
distance = 0
for i in range(1, 9):
x1, y1 = divmod(state.index(i), 3)
x2, y2 = divmod(goal.index(i), 3)
distance += abs(x1 - x2) + abs(y1 - y2)
return distance

# A* Algorithm for 8-Puzzle Problem


def a_star(initial_state, goal_state):
open_list = []
heapq.heappush(open_list, (0, initial_state, []))

visited = {initial_state: 0}

while open_list:
cost, current_state, path = heapq.heappop(open_list)

if current_state == goal_state:
return path

zero_pos = current_state.index(0)
x, y = divmod(zero_pos, 3)

moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]

for dx, dy in moves:


nx, ny = x + dx, y + dy
if 0 <= nx < 3 and 0 <= ny < 3:
new_pos = 3 * nx + ny
new_state = list(current_state)
new_state[zero_pos], new_state[new_pos] = new_state[new_pos], new_state[zero_pos]
new_state = tuple(new_state)

g = len(path) + 1
h = manhattan_distance(new_state, goal_state)
new_cost = g + h

if new_state not in visited or g < visited[new_state]:


visited[new_state] = g
heapq.heappush(open_list, (new_cost, new_state, path + [new_state]))

return None

# Recursive function for depth-limited search in IDA*


def ida_star_recursive(state, g, bound, goal_state, path):
f = g + manhattan_distance(state, goal_state)
if f > bound:
return f, None

if state == goal_state:
return f, path

min_bound = float('inf')
zero_pos = state.index(0)
x, y = divmod(zero_pos, 3)

moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]

for dx, dy in moves:


nx, ny = x + dx, y + dy
if 0 <= nx < 3 and 0 <= ny < 3:
new_pos = 3 * nx + ny
new_state = list(state)
new_state[zero_pos], new_state[new_pos] = new_state[new_pos], new_state[zero_pos]
new_state = tuple(new_state)

new_g = g + 1
new_bound, new_path = ida_star_recursive(new_state, new_g, bound, goal_state, path +
[new_state])

if new_path is not None:


return new_bound, new_path

min_bound = min(min_bound, new_bound)

return min_bound, None

def ida_star(initial_state, goal_state):


bound = manhattan_distance(initial_state, goal_state)
path = []

while True:
bound, path = ida_star_recursive(initial_state, 0, bound, goal_state, [initial_state])
if path is not None:
return path
if bound == float('inf'):
return None

# Main Program
if __name__ == '__main__':
print("Select an algorithm to solve the 8-puzzle problem:")
print("1. A* Algorithm")
print("2. IDA* Algorithm")

choice = int(input("Enter your choice: "))

initial_state = (3, 1, 2, 4, 7, 5, 6, 8, 0)
goal_state = (0, 1, 2, 3, 4, 5, 6, 7, 8)

if choice == 1:
print("Solving using A* Algorithm...")
path = a_star(initial_state, goal_state)
if path:
print("Solution found!")
for step in path:
print(step)
else:
print("No solution found.")

elif choice == 2:
print("Solving using IDA* Algorithm...")
path = ida_star(initial_state, goal_state)
if path:
print("Solution found!")
for step in path:
print(step)
else:
print("No solution found.")

else:
print("Invalid choice!")
OUTPUT:
A* Algorithm:
Select an algorithm to solve the 8-puzzle problem:
1. A* Algorithm
2. IDA* Algorithm
Enter your choice: 1
Solving using A* Algorithm...
Solution found!
(3, 1, 2, 4, 7, 5, 6, 8, 0)
(3, 1, 2, 4, 7, 5, 6, 0, 8)
(3, 1, 2, 4, 7, 5, 0, 6, 8)
(3, 1, 2, 4, 7, 0, 5, 6, 8)
(3, 1, 2, 4, 0, 7, 5, 6, 8)
(3, 1, 2, 0, 4, 7, 5, 6, 8)
(3, 1, 0, 2, 4, 7, 5, 6, 8)
(3, 0, 1, 2, 4, 7, 5, 6, 8)
(0, 3, 1, 2, 4, 7, 5, 6, 8)
(1, 3, 0, 2, 4, 7, 5, 6, 8)
(1, 3, 2, 0, 4, 7, 5, 6, 8)
(1, 3, 2, 4, 0, 7, 5, 6, 8)
(1, 3, 2, 4, 7, 0, 5, 6, 8)
(1, 3, 2, 4, 7, 5, 0, 6, 8)
(1, 3, 2, 4, 7, 5, 6, 0, 8)
(1, 3, 2, 4, 7, 5, 6, 8, 0)
IDA* Algorithm:
Select an algorithm to solve the 8-puzzle problem:
1. A* Algorithm
2. IDA* Algorithm
Enter your choice: 2
Solving using IDA* Algorithm...
Solution found!
(3, 1, 2, 4, 7, 5, 6, 8, 0)
(3, 1, 2, 4, 7, 5, 6, 0, 8)
(3, 1, 2, 4, 7, 5, 0, 6, 8)
(3, 1, 2, 4, 7, 0, 5, 6, 8)
(3, 1, 2, 4, 0, 7, 5, 6, 8)
(3, 1, 2, 0, 4, 7, 5, 6, 8)
(3, 1, 0, 2, 4, 7, 5, 6, 8)
(3, 0, 1, 2, 4, 7, 5, 6, 8)
(0, 3, 1, 2, 4, 7, 5, 6, 8)
(1, 3, 0, 2, 4, 7, 5, 6, 8)
(1, 3, 2, 0, 4, 7, 5, 6, 8)
(1, 3, 2, 4, 0, 7, 5, 6, 8)
(1, 3, 2, 4, 7, 0, 5, 6, 8)
(1, 3, 2, 4, 7, 5, 0, 6, 8)
(1, 3, 2, 4, 7, 5, 6, 0, 8)
(1, 3, 2, 4, 7, 5, 6, 8, 0)

3. Implement Minimax algorithm for game playing (Alpha-Beta pruning)


Program:
MAX, MIN = 1000, -1000

def minimax(depth, nodeIndex, maximizingPlayer, values, alpha, beta):


# Terminating condition. i.e leaf node is reached
if depth == 3:
return values[nodeIndex]

if maximizingPlayer:
best = MIN
# Recur for left and right children
for i in range(0, 2):
val = minimax(depth + 1, nodeIndex * 2 + i, False, values, alpha, beta)
best = max(best, val)
alpha = max(alpha, best)
# Alpha Beta Pruning
if beta <= alpha:
break
return best
else:
best = MAX
# Recur for left and right children
for i in range(0, 2):
val = minimax(depth + 1, nodeIndex * 2 + i, True, values, alpha, beta)
best = min(best, val)
beta = min(beta, best)
# Alpha Beta Pruning
if beta <= alpha:
break
return best

# Driver Code
if __name__ == "__main__":
values = [3, 5, 6, 9, 1, 2, 0, -1]
print("The optimal value is:", minimax(0, 0, True, values, MIN, MAX))

OUTPUT:
The optimal value is : 5
4. Solve constraint satisfaction problems
Program:

VARIABLES = ["csc", "maths", "phy", "che", "tam", "eng", "bio"]


DOMAIN = ["Monday", "Tuesday", "Wednesday"]
CONSTRAINTS = [
("csc", "maths"),
("csc", "phy"),
("mat", "phy"),
("mat", "che"),
("mat", "tam"),
("phy", "tam"),
("phy", "eng"),
("che", "eng"),
("tam", "eng"),
("tam", "bio"),
("eng", "bio")
]

def backtrack(assignment):
# Runs backtracking search to find an assignment
if len(assignment) == len(VARIABLES):
return assignment
var = select_unassigned_variable(assignment)
for value in DOMAIN:
if consistent(var, value, assignment):
assignment[var] = value
result = backtrack(assignment)
if result is not None:
return result
assignment.pop(var)
return None

def select_unassigned_variable(assignment):
# Chooses a variable not yet assigned, in order
for var in VARIABLES:
if var not in assignment.keys():
return var

def consistent(var, value, assignment):


# Checks to see if an assignment is consistent
for var1, var2 in CONSTRAINTS:
if var1 == var or var2 == var:
for var3, day in assignment.items():
if (var3 == var2 or var3 == var1) and day == value:
return False
return True

solution = backtrack(dict())
print(solution)

OUTPUT:
{'csc': 'Monday', 'math': 'Tuesday', 'phy': 'Tuesday', 'che': 'Monday', 'tam': 'Monday', 'eng':
'Wednesday', 'bio': 'Tuesday'}
5. Implement propositional model checking algorithms
Program:
import re

class Literal:
def __init__(self, name, sign=True):
self.name = str(name)
self.sign = sign

def __neg__(self):
return Literal(self.name, False)

def __str__(self):
return str(self.name)

def __repr__(self):
if self.sign:
return '%r' % str(self.__str__())
else:
return '%r' % str("-" + self.__str__())

def CNFconvert(KB):
storage = []
for i in KB:
i = list(i)
for j in i:
j = str(j)
storage.append(i)
return storage

def VariableSet(KB):
KB = eval((CNFconvert(KB).__str__()))
storage = []
for obj in KB:
for item in obj:
if item[0] == '-' and item[1:] not in storage:
storage.append(str(item[1:]))
elif item not in storage and item[0] != '-':
storage.append(str(item))
return storage

def Negativeofx(x):
check = re.match("-", str(x))
if (check):
return str(x[1:])
else:
return "-"+str(x)

def pickX(literals, varList):


for x in varList:
if x not in literals:
break
return x

def splitFalseLiterals(cnf, x):


holder = []
for item in cnf:
if x in item:
item.remove(x)
holder.append(item)
return holder

def splitTrueLiteral(cnf, x):


holder = []
for item in cnf:
if x in item:
continue
else:
holder.append(item)
return holder

def unitResolution(clauses):
literalholder = {}
i=0
while i < len(clauses):
newClauses = []
clause = clauses[i]
if (len(clause) == 1):
literal = str(clause[0])
pattern = re.match("-", literal)
if (pattern):
nx = literal[1:]
literalholder[nx] = False
else:
nx = "-"+literal
literalholder[literal] = True
for item in clauses:
if item != clauses[i]:
if (nx in item):
item.remove(nx)
newClauses.append(item)
i=0
clauses = newClauses
else:
i += 1
return literalholder, clauses

def dpll(clauses, varList):


literals, cnf = unitResolution(clauses)
if (cnf == []):
return literals
elif ([] in cnf):
return "notsatisfiable"
else:
while True:
x = pickX(literals, varList)
x = str(x)
nx = Negativeofx(x)
ncnf = splitTrueLiteral(cnf, x)
ncnf = splitFalseLiterals(ncnf, nx)
if ncnf == cnf:
varList.remove(x)
else:
break
case1 = dpll(ncnf, varList)
if (case1 != "notsatisfiable"):
copy = case1.copy()
copy.update(literals)
copy.update({x: True})
return copy
case1 = dpll(ncnf, varList)
if not case1:
copy = case1.copy()
copy.update(literals)
copy.update({x: False})
return copy
else:
return "notsatisfiable"

def DPLL(KB):
KB = eval((CNFconvert(KB).__str__()))
varList = VariableSet(KB)
result = dpll(KB, varList)
if result == 'notsatisfiable':
return False
else:
for i in varList:
if i in result and result[i] == True:
result[i] = 'true'
elif i in result and result[i] == False:
result[i] = 'false'
else:
result[i] = 'free'
return [True, result]

# Example Usage
A = Literal('A')
B = Literal('B')
C = Literal('C')
D = Literal('D')
KB = [{A, B}, {A, -C}, {-A, B, D}]
print(DPLL(KB))

OUTPUT:
[True, {'B': 'true', 'A': 'true', 'C': 'free', 'D': 'free'}]
6. Implement forward chaining, backward chaining, and resolution strategies
Program:

from sympy import symbols, Or, Not


from sympy.logic.inference import satisfiable

# Define symbols for logical propositions


rains, cloudy = symbols('rains cloudy')
facts = {'has_feathers': True}

# Knowledge base for forward chaining and backward chaining


rules = [
{"if": ["lays_eggs"], "then": "bird"},
{"if": ["has_feathers"], "then": "bird"}
]
cnf_rules = [Or(rains, cloudy), Not(rains)]

def forward_chaining(rules, facts):


inferred = set()
changes = True

while changes:
changes = False
for rule in rules:
if all(facts.get(cond, False) for cond in rule["if"]) and rule["then"] not in inferred:
inferred.add(rule["then"])
facts[rule["then"]] = True
changes = True

return inferred

def backward_chaining(goal, rules, facts):


if goal in facts and facts[goal]:
return True
for rule in rules:
if rule["then"] == goal:
if all(backward_chaining(cond, rules, facts) for cond in rule["if"]):
return True
return False

def resolution_strategy(knowledge_base, goal):


neg_goal = Not(goal)
extended_kb = knowledge_base + [neg_goal]
return not satisfiable(extended_kb)
def combined_inference(goal):
# Forward chaining
inferred_facts = forward_chaining(rules, facts)
print("Inferred facts (Forward Chaining):", inferred_facts)

# Backward chaining
is_goal_achievable = backward_chaining(goal, rules, facts)
print(f"Can we achieve goal '{goal}' (Backward Chaining)?", is_goal_achievable)

# Resolution Strategy
result = resolution_strategy(cnf_rules, goal)
print(f"Is the goal '{goal}' achievable using Resolution Strategy?", result)

# Testing combined inference


goal = cloudy
combined_inference(goal)

OUTPUT:
Inferred facts (Forward Chaining): {'bird'}
Can we achieve goal 'cloudy' (Backward Chaining)? True
Is the goal 'cloudy' achievable using Resolution Strategy? True
7. Build naïve Bayes models
Program:
from sklearn import datasets
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

# The dataset
iris = datasets.load_iris()
X = iris.data
Y = iris.target

# Splitting the dataset into training and testing sets


X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=1/3)

# Training a Gaussian Naive Bayes classifier


model = GaussianNB()
model.fit(X_train, Y_train)

# Making predictions
model_predictions = model.predict(X_test)

# Printing predictions and true labels


print("\nPredictions: ", model_predictions)
print("\nTrue Labels: ", Y_test)

# Calculating and printing accuracy


accuracyScore = accuracy_score(Y_test, model_predictions)
print("\nAccuracy Score: ", accuracyScore)

# Creating and printing a confusion matrix


cm = confusion_matrix(Y_test, model_predictions)
print("\nConfusion Matrix:\n", cm)
OUTPUT:
Predictions: [0 2 1 1 0 0 2 2 1 2 1 0 1 2 0 2 2 0 2 1 1 2 2 0 1 0 0 2 2 0 0 2 0 1 1 1 1 1 2 1 1 0 0 2 1 0 1 0 2
2]
True Labels: [0 2 1 1 0 0 2 2 1 2 1 0 1 2 0 2 2 0 2 1 1 2 2 0 1 0 0 2 2 0 0 2 0 1 1 1 1 1 2 1 1 0 0 2 1 0 1 0 2
2]

Accuracy Score: 1.0

Confusion Matrix:
[[16 0 0]
[ 0 17 0]
[ 0 0 17]]

8. Implement Bayesian networks and perform inferences.


Program:

import numpy as np
from sklearn import datasets
import torch
import torch.nn as nn
import torch.optim as optim
import torchbnn as bnn
import matplotlib.pyplot as plt

# the dataset
dataset = datasets.load_iris()
data = dataset.data
target = dataset.target

# Convert dataset to PyTorch tensors


data_tensor = torch.from_numpy(data).float()
target_tensor = torch.from_numpy(target).long()

# Define the BNN model


model = nn.Sequential(
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=4, out_features=100),
nn.ReLU(),
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=100, out_features=3)
)

# Define loss functions and optimizer


cross_entropy_loss = nn.CrossEntropyLoss()
klloss = bnn.BKLLoss(reduction='mean', last_layer_only=False)
klweight = 0.01
optimizer = optim.Adam(model.parameters(), lr=0.01)

# Train the model


for step in range(3000):
models = model(data_tensor)
cross_entropy = cross_entropy_loss(models, target_tensor)
kl = klloss(model)
total_cost = cross_entropy + klweight * kl
optimizer.zero_grad()
total_cost.backward()
optimizer.step()

# Print accuracy and loss


_, predicted = torch.max(models.data, 1)
final = target_tensor.size(0)
correct = (predicted == target_tensor).sum()
print('- Accuracy: %f %%' % (100 * float(correct) / final))
print('- CE : %2.2f, KL : %2.2f' % (cross_entropy.item(), kl.item()))

# Function to draw the graph


def draw_graph(predicted):
fig = plt.figure(figsize=(16, 8))
fig_1 = fig.add_subplot(1, 2, 1)
fig_2 = fig.add_subplot(1, 2, 2)
z1_plot = fig_1.scatter(data[:, 0], data[:, 1], c=target, marker='v')
z2_plot = fig_2.scatter(data[:, 0], data[:, 1], c=predicted)
plt.colorbar(z1_plot, ax=fig_1)
plt.colorbar(z2_plot, ax=fig_2)
fig_1.set_title("REAL")
fig_2.set_title("PREDICT")
plt.show()

# Evaluate and plot results


models = model(data_tensor)
_, predicted = torch.max(models.data, 1)
draw_graph(predicted)

OUTPUT:

You might also like