0% found this document useful (0 votes)
8 views

DL Lab(6-10) With Output

The document outlines implementations of various neural network architectures including Feedforward Neural Networks, Recurrent Neural Networks, Gated Recurrent Units, Transformers, and Attention Mechanisms using TensorFlow and PyTorch. Each section provides code snippets for training models on specific datasets or tasks, such as classifying the Iris dataset or generating sequences based on sine wave data. Additionally, it demonstrates the use of attention mechanisms in neural networks with practical examples.

Uploaded by

mercy m
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
8 views

DL Lab(6-10) With Output

The document outlines implementations of various neural network architectures including Feedforward Neural Networks, Recurrent Neural Networks, Gated Recurrent Units, Transformers, and Attention Mechanisms using TensorFlow and PyTorch. Each section provides code snippets for training models on specific datasets or tasks, such as classifying the Iris dataset or generating sequences based on sine wave data. Additionally, it demonstrates the use of attention mechanisms in neural networks with practical examples.

Uploaded by

mercy m
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 5

6.

Implementation of Feedforward Neural Network

import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
iris=load_iris()
X=iris.data
y=iris.target
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
model=keras.Sequential([
keras.layers.Input(shape=(4,)),
keras.layers.Dense(64,activation='relu'),
keras.layers.Dense(3,activation='softmax')])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train,y_train,epochs=100,batch_size=32,validation_data=(X_test,y_test))
test_loss,test_accuracy=model.evaluate(X_test,y_test)
print(f"Test accuracy: {test_accuracy}")

7.Implementation of Recurrent Neural Network

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import SimpleRNN, Dense
sequence_length=100
t=np.linspace(0,10,sequence_length)
data=np.sin(t)
X,y=[],[]
sequence_length=10
for i in range(len(data)-sequence_length):
X.append(data[i:i+sequence_length])
y.append(data[i+sequence_length])
X=np.array(X)
y=np.array(y)
model=keras.Sequential([
SimpleRNN(32,activation='relu',input_shape=(sequence_length,1)),
Dense(1)])
model.compile(optimizer='adam',loss='mean_squared_error')
model.fit(X,y,epochs=100,batch_size=16)
initial_sequence=data[:sequence_length]
generated_sequence=np.copy(initial_sequence)
for _ in range(len(data)-sequence_length):
sequence_window=generated_sequence[-sequence_length:].reshape(1, sequence_length,1)
next_value=model.predict(sequence_window)[0][0]
generated_sequence=np.append(generated_sequence,next_value)
print(generated_sequence)

8.Implementation of Gated Recurrent Unit

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import GRU, Dense
sequence_length=100
t=np.linspace(0,10,sequence_length)
data=np.sin(t)
X,y=[],[]
sequence_length=10
for i in range(len(data)-sequence_length):
X.append(data[i:i+sequence_length])
y.append(data[i+sequence_length])
X=np.array(X)
y=np.array(y)
model=keras.Sequential([
GRU(32,activation='tanh',input_shape=(sequence_length,1)),
Dense(1)])
model.compile(optimizer='adam',loss='mean_squared_error')
model.fit(X,y,epochs=100,batch_size=16)
initial_sequence=data[:sequence_length]
generated_sequence=np.copy(initial_sequence)
for _ in range(len(data)-sequence_length):
sequence_window=generated_sequence[-sequence_length:].reshape(1,sequence_length,1)
next_value=model.predict(sequence_window)[0][0]
generated_sequence=np.append(generated_sequence,next_value)
print(generated_sequence)

9.Implementation of Transformers

import torch
from transformers import DistilBertTokenizer,DistilBertForSequenceClassification
from torch.nn.functional import softmax
model_name='distilbert-base-uncased'
tokenizer=DistilBertTokenizer.from_pretrained(model_name)
model=DistilBertForSequenceClassification.from_pretrained(model_name)
text="This is an example sentence for text classification."
inputs=tokenizer(text,return_tensors='pt',padding=True,truncation=True)
with torch.no_grad():
outputs=model(**inputs)
logits=outputs.logits
probabilities=softmax(logits,dim=1)
predicted_class=torch.argmax(probabilities,dim=1).item()
class_labels=["Negative","Neutral","Positive"]
print("Text:",text)
print("Predicted Class:",class_labels[predicted_class])
print("Class Probabilities:",probabilities)

10.Implementation of Attention Mechanisms

import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self):
super(Attention,self).__init__()
def forward(self,query,key,value):
scores=torch.matmul(query,key.transpose(-2,-1))
attention_weights=F.softmax(scores,dim=-1)
weighted_sum=torch.matmul(attention_weights,value)
return weighted_sum,attention_weights
query=torch.randn(1,5,10)
key=torch.randn(1,5,10)
value=torch.randn(1,5,10)
attention=Attention()
weighted_sum,attention_weights=attention(query,key,value)
print("Query:")
print(query)
print("Key:")
print(key)
print("Value:")
print(value)
print("Weighted Sum:")
print(weighted_sum)
print("Attention Weights:")
print(attention_weights)

output:

Query:
tensor([[[ 0.3088, -0.8305, 1.1478, -0.5724, 0.6903, -0.1338, 0.4932,
0.3692, -0.0487, 0.3800],
[ 0.6160, -0.2674, 0.1060, 0.9276, -2.5696, 1.2531, 1.2445,
0.7406, -0.3382, 1.0559],
[ 1.1670, 0.6346, -1.0947, -0.0750, 0.5593, 0.6001, -2.6121,
-0.3675, -1.1025, 2.2920],
[ 1.0586, -0.7961, 1.3131, 0.8322, -0.4906, -0.9463, 0.5578,
0.2760, 0.1075, -0.6122],
[ 1.0031, 0.7011, 1.0347, -1.0988, -0.8351, -0.1433, 1.2998,
-0.4945, -0.5564, 1.3498]]])
Key:
tensor([[[ 1.1721, 1.4810, -1.6439, -0.4880, -0.0859, 1.1992, 0.6742,
0.9838, -0.7225, -1.0221],
[-0.3369, 1.9304, 1.2981, 0.2074, 1.5344, -0.4337, 0.1126,
1.6750, -0.8086, -0.2682],
[-2.3879, 0.6284, 0.4639, -1.2453, 0.2192, 0.2561, 0.0700,
-0.7883, 0.5103, 2.0492],
[ 1.0068, 0.1258, -0.2651, 1.4489, -0.2013, 1.6131, 0.3487,
0.6993, 0.3043, 2.3267],
[-1.6300, -0.3690, -0.1990, -1.4484, 0.8034, 0.8580, -0.9969,
-1.9386, 0.0605, 0.4753]]])
Value:
tensor([[[-0.3352, -0.4282, 0.1739, -0.4824, -1.7696, 1.9512, -1.6211,
1.1564, 0.6727, 0.0463],
[-0.5158, 1.0990, 1.4397, -0.1086, -0.4193, 0.8119, 1.5230,
-0.8639, 1.3441, -0.0614],
[ 0.9254, -0.3878, 2.6741, -0.8424, 0.6695, 0.4599, -0.0873,
1.0231, 0.0884, -0.8836],
[ 1.5770, -2.1937, -0.8749, 2.2441, -1.4209, 0.4604, -0.8374,
-0.3895, 0.2051, -0.0428],
[ 0.3838, 0.4586, -0.2848, 1.7067, -0.3848, -1.8305, -0.0618,
0.9862, -1.2581, 1.0811]]])
Weighted Sum:
tensor([[[ 0.1929, 0.2341, 1.2269, 0.2159, -0.3082, 0.4160, 0.6307,
-0.1386, 0.6160, -0.1283],
[ 1.5698, -2.1869, -0.8706, 2.2336, -1.4220, 0.4659, -0.8402,
-0.3836, 0.2068, -0.0426],
[ 1.4765, -1.9835, -0.7872, 2.1543, -1.3302, 0.3187, -0.7802,
-0.2735, 0.1112, 0.0226],
[ 0.0920, 0.0881, 0.7239, 0.5422, -0.7636, 0.7594, 0.6975,
-0.6245, 0.9803, -0.0509],
[ 1.0075, -0.9445, 1.1727, 0.3475, -0.2765, 0.5281, -0.3535,
0.4296, 0.2083, -0.4654]]])
Attention Weights:
tensor([[[1.2205e-02, 5.1663e-01, 2.3397e-01, 1.3063e-01, 1.0657e-01],
[3.7148e-03, 1.2883e-05, 9.6751e-05, 9.9617e-01, 1.8418e-06],
[6.3252e-03, 1.0601e-03, 1.1167e-02, 9.1533e-01, 6.6118e-02],
[4.7229e-02, 6.6431e-01, 2.9204e-03, 2.8337e-01, 2.1704e-03],
[5.3985e-02, 5.1660e-02, 5.2508e-01, 3.5587e-01, 1.3407e-02]]])

You might also like