Upload
others
View
4
Download
0
Embed Size (px)
Citation preview
Department of Information Technology Software Laboratory-V
--------------------------------------------------------------------------------------------------------------------- Assignment No: 1
---------------------------------------------------------------------------------------------------------------------
Title of the Assignment: Implementation of Client Server Application Using Thread. ---------------------------------------------------------------------------------------------------------------------
//MyClient.java import java.net.*; import java.io.*; public class MyClient { public static void main (String args[]) { int n,ch,ch1; String st,st1,st2,first,last; try { Socket s = new Socket(args[0], 2001); DataInputStream in = new DataInputStream( s.getInputStream()); DataOutputStream out =new DataOutputStream( s.getOutputStream()); BufferedReader object = new BufferedReader(new InputStreamReader(System.in)); do { System.out.println("1.Factorial\n2.Adddition of digits\n3.String
operations\n4.Exit\nEnter ur choice"); ch= Integer.parseInt(object.readLine()); out.writeUTF(Integer.toString(ch)); switch(ch) { case 1: System.out.println("Enter a number"); n= Integer.parseInt(object.readLine()); out.writeUTF(Integer.toString(n)); int fact =Integer.parseInt(in.readUTF()); System.out.println("Factorial of "+n+"is "+fact) ; break; case 2: System.out.println("Enter a number"); n= Integer.parseInt(object.readLine()); out.writeUTF(Integer.toString(n)); int sum =Integer.parseInt(in.readUTF()); System.out.println("Addition of digits of "+n+" is "+sum) ; break; case 3: do { System.out.println("1.concatenation\n2.substring\n3.palindrome\n4.Exit\nEnter ur
choice"); ch1= Integer.parseInt(object.readLine()); out.writeUTF(Integer.toString(ch1)); switch(ch1) { case 1: System.out.println("Enter First string"); st1=object.readLine();
Department of Information Technology Software Laboratory-V
out.writeUTF(st1); System.out.println("Enter second string"); st2=object.readLine(); out.writeUTF(st2); st=in.readUTF(); System.out.println("Concatenated String of "+st1+" and "+st2+" is :: "+st) ; break; case 2: System.out.println("Enter The string"); st1=object.readLine(); out.writeUTF(st1); System.out.println("Enter The Start Position of the substring"); first=object.readLine(); out.writeUTF(first); System.out.println("Enter The end Position of the substring"); last=object.readLine(); out.writeUTF(last); st=in.readUTF(); System.out.println("Substring of string "+st1+" is :: "+st) ; break; case 3: System.out.println("Enter The string"); st=object.readLine(); out.writeUTF(st); n=Integer.parseInt(in.readUTF()); if(n==0) System.out.println("string"+st+" is Palindrome") ; else System.out.println("string"+st+" is not Palindrome") ; break; } }while(ch1>0 && ch1!=4); break; case 4: System.exit(0); break; } }while(ch>0); } catch (Exception e) { System.out.println("Exception:"+e.getMessage()); }}} //MyServer.java import java.net.*; import java.io.*; public class MyServer extends Thread { DataInputStream in; DataOutputStream out; Socket clientSocket; int n,ch,ch1,f,l; String st,st1,st2; public MyServer() { Try { ServerSocket listenSocket = new ServerSocket(2001);
Department of Information Technology Software Laboratory-V
System.out.println("\nServer is Running") ; clientSocket = listenSocket.accept(); System.out.println("\n Client is Connected") ; in = new DataInputStream( clientSocket.getInputStream()); out =new DataOutputStream( clientSocket.getOutputStream()); this.start(); } catch(IOException e) { System.out.println("Connection:"+e.getMessage()); } } public void run() { Try { while(true) { ch =Integer.parseInt(in.readUTF()); switch(ch) { case 1: int fact= 1; n =Integer.parseInt(in.readUTF()); for (int i= 1; i<=n; i++) { fact=fact*i; } out.writeUTF(Integer.toString(fact)); break; case 2: int sum=0,rem; n =Integer.parseInt(in.readUTF()); while(n!=0) { rem = n%10; n = n/10; sum=sum + rem; } out.writeUTF(Integer.toString(sum)); break; case 3: while(true) { ch1 =Integer.parseInt(in.readUTF()); switch(ch1) { case 1: st1=in.readUTF(); st2=in.readUTF(); st=st1.concat(st2); out.writeUTF(st); break; case 2: st1=in.readUTF(); f =Integer.parseInt(in.readUTF()); l =Integer.parseInt(in.readUTF()); st=st1.substring(f,l); out.writeUTF(st); st=null; break; case 3:
Department of Information Technology Software Laboratory-V
st=in.readUTF(); st1 = new StringBuffer(st).reverse().toString(); n=st.compareTo(st1); out.writeUTF(Integer.toString(n)); break; } }}}} catch (Exception e) { System.out.println("Exception :"+e.getMessage()); }} public static void main (String args[]) { try { MyServer s=new MyServer(); } catch(Exception e) { System.out.println("Listen socket:"+e.getMessage()); e.printStackTrace(); } }}
SL- V Class:BE IT Exp 2:
Aim: Design a distributed application using RPC for remote computation where client submits an integer value to the server and server calculates factorial and returns the result to the client program.
Steps:
# Create the IDL
# Open terminal
sudo apt-get update
sudo apt-get install rpcbind
mkdir exp2
cd exp2
gedit fact.x # add following code in it
struct intpair { int a; }; program FACT_PROG { version FACT_VERS { int FACT(intpair) = 1; } = 1; } = 0x23451111;
# save and exit the file
rpcgen -a -C fact.x
gedit Makefile.fact # find the following line in the file CFLAGS += -g and change it to: CFLAGS += -g -DRPC_SVC_FG
# find the following line in the same file RPCGENFLAGS = and change it to: RPCGENFLAGS = -C
# save and exit the file
gedit fact_client.c # we will make some changes in this file (changes are highlighted)
#include "fact.h"
void fact_prog_1(char *host,int a) { CLIENT *clnt; int *result_1; intpair fact_1_arg;
#ifndefDEBUG
clnt = clnt_create (host, FACT_PROG, FACT_VERS, "udp"); if (clnt == NULL) { clnt_pcreateerror (host); exit (1); } #endif /* DEBUG */
fact_1_arg.a=a;
result_1 = fact_1(&fact_1_arg, clnt); if (result_1 == (int *) NULL) { clnt_perror (clnt, "call failed");
}
else { printf("Factorial=%d",*result_1); }
#ifndefDEBUG
clnt_destroy (clnt);#endif /* DEBUG */ }
int main (int argc, char *argv[]) {
char *host; int a,ch; if (argc < 2) { printf ("usage: %s server_host\n", argv[0]); exit (1); } host = argv[1];
Department of Information Technology Software Laboratory-V
--------------------------------------------------------------------------------------------------------------------- Assignment No: 3
---------------------------------------------------------------------------------------------------------------------
Title of the Assignment: Design a distributed application using RMI for remote computation where client submits two numbers to the server and server returns the addition of the given numbers. ---------------------------------------------------------------------------------------------------------------------
//AddClient.java import java.rmi.*; public class AddClient { public static void main(String[] args) { try{ // int n1 = 0,n2=0; String addServerURL = "rmi://localhost/AddServer"; AddServerIntf addServerInt = (AddServerIntf)Naming.lookup(addServerURL); System.out.println("The first number is: " + args[0]); int n1 = Integer.parseInt(args[0]); System.out.println("The second number is: " + args[1]); int n2 = Integer.parseInt(args[1]); System.out.println("The Addition is: " + addServerInt.add(n1,n2)); } catch(Exception e) { System.out.println("Exception: " + e); } } } //AddServer.java import java.rmi.*; import java.net.*; public class AddServer { public static void main(String args[]) { try { AddServerImpl addServerImpl = new AddServerImpl(); Naming.rebind("AddServer", addServerImpl); } catch(Exception e) { System.out.println("Exception:" + e); } } } //AddServerImpl.java import java.rmi.*; import java.rmi.server.*; public class AddServerImpl extends UnicastRemoteObject implements AddServerIntf { public AddServerImpl()throws RemoteException { } public int add(int n1,int n2) throws RemoteException { int result;
Department of Information Technology Software Laboratory-V
result=n1+n2; return result; } } //AddServerIntf.java import java.rmi.*; public interface AddServerIntf extends Remote { int add (int n1, int n2)throws RemoteException; }
SL – V Exp 3: Aim: Design a distributed application using Message Passing Interface (MPI) for remote computation where client submits a string to the server and server returns the reverse of it to the client. Steps: # open terminal sudo apt-get update sudo apt-get install libopenmpi-dev mkdir exp3 cd exp3 gedit server.c # add following code in it #include <stdlib.h> #include <stdio.h> #include "mpi.h" #include<string.h> int main(int argc, char **argv) { MPI_Comm client; MPI_Status status; char port_name[MPI_MAX_PORT_NAME],str[50],ch,temp; int size, again, i,j;
MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 1) { fprintf(stderr, "Server too big"); exit(EXIT_FAILURE);
} MPI_Open_port(MPI_INFO_NULL, port_name); printf("Server available at port: %s\n", port_name);
i=0; while (1) {
MPI_Comm_accept(port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &client); again = 1; while (again) { MPI_Recv(&ch, 1, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, client, &status); switch (status.MPI_TAG) {
case 0: MPI_Comm_free(&client); MPI_Close_port(port_name); MPI_Finalize(); return 0;
case 1:
printf("\nReceived String: %s\n",str);
// reverse the string i = 0;
j = strlen(str) - 1;
while (i < j) { temp = str[i]; str[i] = str[j]; str[j] = temp; i++; j--;
} printf("\nReversed string is : %s\n",str);
// send the reversed string to client (character by character)
for (i = 0; i < strlen(str); i++) {
ch=str[i];
MPI_Send(&ch, 1, MPI_CHAR, 0, 2, client);
}
//send tag=1 to indicate end of string MPI_Send(&ch, 1, MPI_CHAR, 0, 1, client);
MPI_Comm_disconnect(&client);
again = 0; strcpy(str,""); i=0;
break;
case 2:
printf("Received character: %c\n", ch); str[i]=ch; i++; // add null character at the end of string
str[i]='\0'; break;
default: /* Unexpected message type */ MPI_Abort(MPI_COMM_WORLD, 1);
} }
} } # save and exit the
file gedit client.c
# add following code in it #include <stdlib.h> #include <stdio.h> #include <string.h> #include "mpi.h" int main( int argc, char **argv ) { MPI_Comm server; MPI_Status status; char port_name[MPI_MAX_PORT_NAME],str[50],ch; int i, tag,again;
if (argc < 2) { fprintf(stderr, "server port name required.\n"); exit(EXIT_FAILURE);
}
MPI_Init(&argc, &argv); strcpy(port_name, argv[1]); MPI_Comm_connect(port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &server);
// accept input string printf("\nEnter the string :\n"); scanf("%s",str);
//send string to server (character by character) for (i = 0; i < strlen(str); i++) {
if(str[i]!='\0')
ch=str[i];
tag=2; MPI_Send(&ch, 1, MPI_CHAR, 0, tag, server);
}
// done sending string to the server MPI_Send(&i, 0, MPI_INT, 0, 1, server);
// Receive the reversed string from server and display it i=0; again=1;
while (again) { MPI_Recv(&ch, 1, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, server, &status); switch (status.MPI_TAG) { case 2:
str[i]=ch; i++; break;
case 1: again=0;
break;
} }
printf("\nReversed string is : %s\n\n",str);
MPI_Comm_disconnect(&server); MPI_Finalize(); return 0;
} # save and exit the file # compile mpicc server.c -o server mpicc client.c -o client # run server mpirun -np 1 ./server # it will display output similar to below (not necessarily the same) Server available at port: 4290510848.0;tcp://192.168.1.101:35820;tcp://192.168.122.1:35820+4290510849.0;tcp://192.168.1 .101:40208;tcp://192.168.122.1:40208:300 # copy the port-string from the terminal output (e.g. the highlighted portion above) # we are going to supply this port-string as a first command line argument to the client
# open another terminal mpirun -np 1 ./client '4290510848.0;tcp://192.168.1.101:35820;tcp://192.168.122.1:35820+4290510849.0;tcp://192.168. 1.101:40208;tcp://192.168.122.1:40208:300' # Don't forget to insert single quotes at the start & end of the port-string. # output : Server Terminal # Ouput: Client terminal ------------------------------------------------------------------------------------------------------------------------
SL-V BE IT EXP 5 Part A [According to new revised assignments] Aim: Design a distributed application using MapReduce under Hadoop for: a) Character counting in a given text file. Steps: First install hadoop (if not installed yet) by, https://sl6it.wordpress.com/2015/12/04/1-study-and-configure-hadoop-for-big-data/ # Download sample.txt file (attached with this post) # Paste sample.txt in your home folder # Open terminal whoami # It will display your user name, we will use it later. # Open eclipse->new java project->project name exp5a->new class-> CharMap # Add following code in that class package exp5a; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class CharMap extends Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString(); char[] carr = line.toCharArray(); for (char c : carr) {
System.out.println(c); context.write(new Text(String.valueOf(c)), new IntWritable(1));
}
} } # Save the file # It will display some errors, so we are going to import three jar files in our project. # Copy hadoop-mapreduce-client-core-2.7.1.jar from ~/hadoop/share/hadoop/mapreduce directory # In eclipse-> right click on exp5a project- >paste # Right click on pasted hadoop-mapreduce-client-core-2.7.1.jar-> Buid path-> add to buid path #Copy hadoop-common-2.7.1.jar from ~/hadoop/share/hadoop/common directory # In eclipse-> right click on exp5a project- >paste # Right click on pasted hadoop-common-2.7.1.jar-> Buid path-> add to buid path #Copy commons-cli-1.2.jar from ~/hadoop/share/hadoop/common/lib directory # In eclipse-> right click on exp5a project- >paste # Right click on pasted commons-cli-1.2.jar-> Buid path-> add to buid path
# In eclipse->right click on project exp5a->new class-> CharReduce # Add following code in that class package exp5a; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class CharReduce extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key,Iterable<IntWritable> values,Context context)throws IOException,InterruptedException{
int count = 0; IntWritable result = new IntWritable(); for (IntWritable val : values) {
count +=val.get(); result.set(count);
} context.write(key, result);
} } # Save the file # In eclipse->right click on project exp5a->new class-> CharCount # Add following code in that class package exp5a; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class CharCount {
public static void main(String[] args) throws Exception { // TODO Auto-generated method stub Configuration conf = new Configuration(); @SuppressWarnings("deprecation")
Job job = new Job(conf, "Charcount"); job.setJarByClass(CharCount.class); job.setMapperClass(CharMap.class); job.setReducerClass(CharReduce.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path( args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1);
} } # Save the file
# In eclipse->Right click on project exp5a-> export->java->jar file->next-> select the export destination -> /home/your_user_name/exp5a.jar -> next -> next -> select main class ->browse -> CharCount -> finish # exp5a.jar file will be created in your home folder # Open terminal # Now Start NameNode daemon and DataNode daemon:
~/hadoop/sbin/start-dfs.sh # Make the HDFS directories required to execute
MapReduce jobs ~/hadoop/bin/hdfs dfs -mkdir /user
~/hadoop/bin/hdfs dfs -mkdir /user/your_user_name # Put sample.txt file in hdfs
~/hadoop/bin/hdfs dfs -put ~/sample.txt input_data # Perform MapReduce job
~/hadoop/bin/hadoop jar ~/exp5a.jar input_data output_data # Output
~/hadoop/bin/hdfs dfs -cat output_data/* # Our task is done, so delete the distributed files (input_data &
output_data) ~/hadoop/bin/hdfs dfs -rm -r input_data output_data # Stop haddop
~/hadoop/sbin/stop-dfs.sh jps Reference : Hadoop the definitive guide, O’Reilly Publications, by Tom White ------------------------------------------------------------------------------------------------------------------
SL-V BE IT EXP 5 Part B (According to new revised assignments) Aim: Design a distributed application using MapReduce under Hadoop for: b) Counting no. of occurrences of every word in a given text file. Steps: First install hadoop (if not installed yet) by, https://sl6it.wordpress.com/2015/12/04/1-study-and-configure-hadoop-for-big-data/ # Download sample.txt file (attached with this post) # Paste sample.txt in your home folder # Open terminal whoami # It will display your user name, we will use it later. # Open eclipse->new java project->project name exp5b->new class-> WordCount # Add following code in that class package exp5b; import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; public class WordCount {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1); private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) {
word.set(itr.nextToken()); context.write(word, one);
} }
}
public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context ) throws IOException, InterruptedException {
int sum = 0; for (IntWritable val : values) { sum += val.get();
} result.set(sum); context.write(key, result);
} }
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf,
args).getRemainingArgs(); if (otherArgs.length < 2) { System.err.println("Usage: wordcount <in> [<in>...] <out>"); System.exit(2);
} Job job = Job.getInstance(conf, "word count"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); for (int i = 0; i < otherArgs.length - 1; ++i) { FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
} FileOutputFormat.setOutputPath(job, new Path(otherArgs[otherArgs.length - 1]));
System.exit(job.waitForCompletion(true) ? 0 : 1); }
} # Save the file # It will display some errors, so we are going to import three jar files in our project. # Copy hadoop-mapreduce-client-core-2.7.1.jar from ~/hadoop/share/hadoop/mapreduce directory # In eclipse-> right click on exp5b project- >paste # Right click on pasted hadoop-mapreduce-client-core-2.7.1.jar-> Buid path-> add to buid path #Copy hadoop-common-2.7.1.jar from ~/hadoop/share/hadoop/common directory # In eclipse-> right click on exp5b project- >paste # Right click on pasted hadoop-common-2.7.1.jar-> Buid path-> add to buid path #Copy commons-cli-1.2.jar from ~/hadoop/share/hadoop/common/lib directory # In eclipse-> right click on exp5b project- >paste # Right click on pasted commons-cli-1.2.jar-> Buid path-> add to buid path # In eclipse->Right click on project exp5b-> export->java->jar file->next-> select the export destination -> /home/your_user_name/exp5b.jar -> next -> next -> select main class ->browse -> WordCount -> finish # exp5b.jar file will be created in your home folder # Open terminal
# Now Start NameNode daemon and DataNode
daemon: ~/hadoop/sbin/start-dfs.sh
# Make the HDFS directories required to execute
MapReduce jobs ~/hadoop/bin/hdfs dfs -mkdir /user
~/hadoop/bin/hdfs dfs -mkdir /user/your_user_name # Put sample.txt file in hdfs
~/hadoop/bin/hdfs dfs -put ~/sample.txt input_data # Perform MapReduce job
~/hadoop/bin/hadoop jar ~/exp5b.jar input_data output_data # Output
~/hadoop/bin/hdfs dfs -cat output_data/* # Our task is done, so delete the distributed files (input_data &
output_data) ~/hadoop/bin/hdfs dfs -rm -r input_data output_data # Stop haddop
~/hadoop/sbin/stop-dfs.sh jps Reference : Hadoop the definitive guide, O’Reilly Publications, by Tom White ------------------------------------------------------------------------------------------------------------------
SL-V BE IT EXP 6 (According to new revised assignments) Aim: Design a distributed application using MapReduce under Hadoop for finding maximum number in first and second columns in every line of a given text file. Steps: First install hadoop (if not installed yet) by, https://sl6it.wordpress.com/2015/12/04/1-study-and-configure-hadoop-for-big-data/ # Download sample6.txt file (attached with this post) # Paste sample6.txt in your home folder # Open terminal whoami # It will display your user name, we will use it later. # Open eclipse->new java project->project name exp6->new class-> MaxMap # Add following code in that class package exp6; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class MaxMap extends Mapper<LongWritable, Text, Text, IntWritable> { int values[] = new int[10000]; int
values1[] = new int[10000]; String word[] ; int maxValue = 0,linenum =0;
public void map(LongWritable key, Text value, Context context) throws
IOException, InterruptedException { String words = value.toString();
System.out.println(words); word = words.split(","); for (int i = 0; i < 2; i++) { System.out.println(word[i]); values[i] =
Integer.parseInt(word[i]); values1[i] = Integer.parseInt(word[i]);
} if(values1[0] < values1[1])
{ int temp =values1[0]; values1[0] = values1[1]; values1[1] = temp;
} maxValue = values1[0]; String text = ""+(linenum+1)+"\t"+values[0]+"\t"+values[1]+""; if(linenum>=0) { context.write(new Text(text), new IntWritable(maxValue)); } linenum++;
} } # Save the file
# It will display some errors, so we are going to import three jar files in our project. # Copy hadoop-mapreduce-client-core-2.7.1.jar from ~/hadoop/share/hadoop/mapreduce directory # In eclipse-> right click on exp6 project- >paste # Right click on pasted hadoop-mapreduce-client-core-2.7.1.jar-> Buid path-> add to buid path #Copy hadoop-common-2.7.1.jar from ~/hadoop/share/hadoop/common directory # In eclipse-> right click on exp6 project- >paste # Right click on pasted hadoop-common-2.7.1.jar-> Buid path-> add to buid path #Copy commons-cli-1.2.jar from ~/hadoop/share/hadoop/common/lib directory # In eclipse-> right click on exp6 project- >paste # Right click on pasted commons-cli-1.2.jar-> Buid path-> add to buid path # In eclipse->right click on project exp6->new class-> MaxCount # Add following code in that class (replace your_user_name by your own username ) package exp6; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.util.ArrayList; import java.util.List; import java.util.Scanner; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class MaxCount extends Configured implements Tool {
public static void main(String[] args) throws Exception { int res = ToolRunner.run(new Configuration(), new MaxCount() , args);
System.exit(res); } @Override public int run(String[] args) throws Exception {
Configuration conf = getConf(); @SuppressWarnings("deprecation")
Job job = new Job(conf,"MaxCount"); job.setJarByClass(MaxCount.class); job.setMapperClass(MaxMap.class); job.setNumReduceTasks(0); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class);
job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1])); Path outputpath = new Path(args[1]); outputpath.getFileSystem(conf).delete(outputpath, true);
job.waitForCompletion(true);
FileSystem fs = FileSystem.get(conf); FileStatus[] status = fs.listStatus(new Path(args[1]));
//copy hdfs output file to local file for(int i=0;i<status.length;i++){ System.out.println(status[i].getPath()); fs.copyToLocalFile(false, status[i].getPath(), new
Path("/home/your_user_name/"+args[1])); }
System.out.println("\nLine\tFirst\tSecond\tMaximum"); System.out.println("no \tColumn\tColumn\n");
//display contents of local file BufferedReader br = new BufferedReader(new
FileReader("/home/your _user_name/"+args[1])); String line = null; while ((line = br.readLine()) != null) { System.out.println(line); } br.close();
Scanner s = new Scanner(new File("/home/your_user_name/"+args[1])); List<Integer> max_values = new ArrayList<Integer>();
while (s.hasNext()) { s.next(); s.next(); s.next(); max_values.add(Integer.parseInt(s.next())); } int maximum=0;
for (int max: max_values) {
if(max>maximum) { maximum=max;
} }
System.out.println("\nOverall Maximum: "+maximum+"\n"); s.close();
return 0; }
} # Save the file # In eclipse->Right click on project exp6-> export->java->jar file->next-> select the export destination -> /home/your_user_name/exp6.jar -> next -> next -> select main class ->browse -> MaxCount -> finish # exp6.jar file will be created in your home folder
# Open terminal # Now Start NameNode daemon and DataNode
daemon: ~/hadoop/sbin/start-dfs.sh # Make the HDFS directories required to execute
MapReduce jobs ~/hadoop/bin/hdfs dfs -mkdir /user
~/hadoop/bin/hdfs dfs -mkdir /user/your_user_name # Put sample6.txt file in hdfs
~/hadoop/bin/hdfs dfs -put ~/sample6.txt input_data # Perform MapReduce job
~/hadoop/bin/hadoop jar ~/exp6.jar input_data output_data # Output
# Our task is done, so delete the distributed files (input_data & output_data) ~/hadoop/bin/hdfs dfs -rm -r input_data output_data
# Also delete local output file rm -r ~/output_data
# Stop haddop
~/hadoop/sbin/stop-dfs.sh jps Reference : Hadoop the definitive guide, O’Reilly Publications, by Tom White ------------------------------------------------------------------------------------------------------------------
EL-V BE IT EXP 7 Aim: Design and develop a distributed application to find the coolest/hottest year from the available weather data. Use weather data from the Internet and process it using MapReduce. Steps: First install hadoop (if not installed yet) by, https://sl6it.wordpress.com/2015/12/04/1-study-and-configure-hadoop-for-big-data/ # Download dataset.zip file (attached with this post) # It contains NCDC weather data from year 1901 to year 1920. # Copy and extract dataset.zip in your home folder # Open terminal whoami # It will display your user name, we will use it later. # Open eclipse->new java project->project name exp7->new class->MaxTemperatureMapper # Add following code in that class package exp7; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class MaxTemperatureMapper extends Mapper<LongWritable, Text, Text, IntWritable>
{ private static final int MISSING= 9999; @Override
public void map(LongWritable key,Text value, Context context) throws IOException,InterruptedException {
String line = value.toString(); String year = line.substring(15, 19); int airTemperature; if (line.charAt(87)=='+')
{ airTemperature = Integer.parseInt(line.substring(88, 92));
} else
{ airTemperature = Integer.parseInt(line.substring(87, 92));
} String quality = line.substring(92, 93); if (airTemperature != MISSING && quality.matches("[01459]"))
{ context.write(new Text(year), new IntWritable(airTemperature));
} }
} # Save the file
# It will display some errors, so we are going to import two jar files in our project. # Copy hadoop-mapreduce-client-core-2.7.1.jar from ~/hadoop/share/hadoop/mapreduce directory # In eclipse-> right click on exp7 project- >paste # Right click on pasted hadoop-mapreduce-client-core-2.7.1.jar-> Buid path-> add to buid path #Copy hadoop-common-2.7.1.jar from ~/hadoop/share/hadoop/common directory # In eclipse-> right click on exp7 project- >paste # Right click on pasted hadoop-common-2.7.1.jar-> Buid path-> add to buid path # Right click on project exp7->new class-> MaxTemperatureReducer # Add following code in that class package exp7; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class MaxTemperatureReducer extends Reducer<Text,IntWritable, Text, IntWritable> {
@Override public void reduce(Text key, Iterable<IntWritable> values, Context
context) throws IOException, InterruptedException {
int maxValue = Integer.MIN_VALUE; for (IntWritable value : values)
{ maxValue = Math.max(maxValue, value.get());
} context.write(key, new IntWritable(maxValue));
} } # Save the file # Right click on project exp7->new class-> MaxTemperature # Add following code in that class (replace your_user_name by your own username) # hdfs port number here is 1234, replace it with your port no (if different). package exp7; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.util.ArrayList; import java.util.List; import java.util.Scanner; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MaxTemperature { public static void main(String[] args) throws Exception
{ if (args.length != 2) {
System.err.println("Usage:MaxTemperature <input path> <output path>"); System.exit(-1);
} @SuppressWarnings("deprecation") Job job = new Job(); job.setJarByClass(MaxTemperature.class); job.setJobName("Max temperature"); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); job.setMapperClass(MaxTemperatureMapper.class); job.setReducerClass(MaxTemperatureReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.waitForCompletion(true);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:1234/user/your_user_name/"); FileSystem fs = FileSystem.get(conf); FileStatus[] status = fs.listStatus(new Path(args[1]));
//copy hdfs output file to local folder
for(int i=0;i<status.length;i++){ System.out.println(status[i].getPath());
fs.copyToLocalFile(false, status[i].getPath(), new Path("/home/your_user_name/"+args[1])); } System.out.println("\nYear\tTemperature\n"); //display contents of local file
BufferedReader br = new BufferedReader(new FileReader("/home/your_user_name/"+args[1]));
String line = null; while ((line = br.readLine()) != null) { System.out.println(line);
} br.close(); Scanner s = new Scanner(new File("/home/your_user_name/"+args[1])); List<Integer> temps = new ArrayList<Integer>(); List<String> years = new ArrayList<String>();
while (s.hasNext()) { years.add(s.next());
temps.add(Integer.parseInt(s.next())); }
int max_temp=0,min_temp =999,i=0,j=0; String hottest_year="", coolest_year=""; for (int temp: temps)
{if(temp>max_temp) { max_temp=temp;
hottest_year=years.get(i); } i++;
}
float max_temp1=max_temp; System.out.println("Hottest Year:"+hottest_year);
System.out.println("\tTemperature:"+max_temp1/10+" Degree Celcius");
for (int temp: temps) {
if(temp<min_temp) {
min_temp=temp; coolest_year=years.get(j);
} j++;
} float min_temp1=min_temp ;
System.out.println("Coolest Year:"+coolest_year); System.out.println("\tTemperature:"+min_temp1/10+" Degree Celcius");
s.close(); }
} # Save the file # In eclipse->Right click on project exp7-> export->java->jar file->next-> select the export destination -> /home/your_user_name/exp7.jar -> next -> next -> select main class ->browse -> MaxTemperature -> finish # exp7.jar file will be created in your home folder # Open terminal # Now Start NameNode daemon and DataNode daemon:
~/hadoop/sbin/start-dfs.sh # Make the HDFS directories required to execute MapReduce jobs (if not
already done) ~/hadoop/bin/hdfs dfs -mkdir /user
~/hadoop/bin/hdfs dfs -mkdir /user/your_user_name # Put NCDC weather dataset in hdfs
~/hadoop/bin/hdfs dfs -put ~/dataset input_dataset # Perform MapReduce job
~/hadoop/bin/hadoop jar ~/exp7.jar input_dataset output_dataset # Output
# Stop haddop
~/hadoop/sbin/stop-dfs.sh jps Reference : Hadoop the definitive guide, O’Reilly Publications, by Tom White ------------------------------------------------------------------------------------------------------------------