使用MapReduce实现Bayes算法

2024-06-20 18:18

本文主要是介绍使用MapReduce实现Bayes算法,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

代码如下:

NBayes.conf

4 cl1 cl2 cl3 cl4
3 p1 12 p2 16 p3 17

NBayes.train

cl1 5 6 7
cl2 3 8 4
cl1 2 5 2
cl3 7 8 7
cl4 3 8 2
cl4 9 2 7
cl2 1 8 5
cl5 2 9 4
cl3 10 3 4
cl1 4 5 6
cl3 4 6 7

NBayes.test

1 5 6 7
2 1 8 5
3 2 9 4
4 10 3 4
5 4 5 6
6 3 8 4
7 2 5 2
8 7 8 7
9 3 8 2
10 9 2 7
11 4 6 7

package naivebayes;


import java.util.ArrayList;


public class NaiveBayesConf {
public int dimen;
public int class_num;
public ArrayList<String> classNames;
public ArrayList<String> proNames;
public ArrayList<Integer> proRanges;


public NaiveBayesConf() {
dimen = class_num = 0;
classNames = new ArrayList<String>();
proNames = new ArrayList<String>();
proRanges = new ArrayList<Integer>();
}


public void ReadNaiveBayesConf(String file, Configuration conf)
throws Exception {
Path conf_path = new Path(file);
FileSystem hdfs = conf_path.getFileSystem(conf);
FSDataInputStream fsdt = hdfs.open(conf_path);
Scanner scan = new Scanner(fsdt);
String str = scan.nextLine();
String[] vals = str.split(" ");


class_num = Integer.parseInt(vals[0]);


int i;


for (i = 1; i < vals.length; i++) {
classNames.add(vals[i]);
}


str = scan.nextLine();
vals = str.split(" ");
dimen = Integer.parseInt(vals[0]);


for (i = 1; i < vals.length; i += 2) {
proNames.add(vals[i]);
proRanges.add(new Integer(vals[i + 1]));
}
fsdt.close();
scan.close();
}
}

package naivebayes;


import org.apache.hadoop.conf.Configuration;


public class NaiveBayesMain {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
FileSystem fs = FileSystem.get(conf);
Path path_train, path_temp, path_test, path_out;
if (otherArgs.length != 5) {
System.err
.println("Usage: NaiveBayesMain <dfs_path> <conf> <train> <test> <out>");
System.exit(2);
}


conf.set("conf", otherArgs[0] + "/" + otherArgs[1]);
conf.set("train", otherArgs[0] + "/" + otherArgs[2]);
conf.set("test", otherArgs[0] + "/" + otherArgs[3]);
conf.set("output", otherArgs[0] + "/" + otherArgs[4]);


put2HDFS(otherArgs[1], otherArgs[0] + "/" + otherArgs[1], conf);
put2HDFS(otherArgs[2], otherArgs[0] + "/" + otherArgs[2], conf);
put2HDFS(otherArgs[3], otherArgs[0] + "/" + otherArgs[3], conf);


path_train = new Path(otherArgs[0] + "/" + otherArgs[2]);
path_temp = new Path(otherArgs[0] + "/" + otherArgs[2] + ".train1");
path_test = new Path(otherArgs[0] + "/" + otherArgs[3]);
path_out = new Path(otherArgs[0] + "/" + otherArgs[4]);


{
Job job_train = new Job(conf, "naive bayse training");
job_train.setJarByClass(NaiveBayesMain.class);
job_train.setMapperClass(NaiveBayesTrain.TrainMapper.class);
job_train.setCombinerClass(NaiveBayesTrain.TrainReducer.class);
job_train.setReducerClass(NaiveBayesTrain.TrainReducer.class);
job_train.setOutputKeyClass(Text.class);
job_train.setOutputValueClass(IntWritable.class);


FileInputFormat.setInputPaths(job_train, path_train);
if (fs.exists(path_temp))
fs.delete(path_temp, true);
FileOutputFormat.setOutputPath(job_train, path_temp);
if (job_train.waitForCompletion(true) == false)
System.exit(1);


conf.set("train_result", otherArgs[0] + "/" + otherArgs[2]
+ ".train1");
}
{
Job job_test = new Job(conf, "naive bayse testing");
job_test.setJarByClass(NaiveBayesTest.class);
job_test.setMapperClass(NaiveBayesTest.TestMapper.class);
job_test.setOutputKeyClass(Text.class);
job_test.setOutputValueClass(Text.class);


FileInputFormat.setInputPaths(job_test, path_test);
if (fs.exists(path_out))
fs.delete(path_out, true);
FileOutputFormat.setOutputPath(job_test, path_out);
if (job_test.waitForCompletion(true) == false)
System.exit(1);
fs.delete(path_temp, true);
}


// getFromHDFS(otherArgs[0] + "/" + otherArgs[4], ".", conf);


fs.close();
System.exit(0);
}


public static void put2HDFS(String src, String dst, Configuration conf)
throws Exception {
Path dstPath = new Path(dst);
FileSystem hdfs = dstPath.getFileSystem(conf);


hdfs.copyFromLocalFile(false, true, new Path(src), new Path(dst));


}


public static void getFromHDFS(String src, String dst, Configuration conf)
throws Exception {
Path dstPath = new Path(dst);
FileSystem lfs = dstPath.getFileSystem(conf);
String temp[] = src.split("/");
Path ptemp = new Path(temp[temp.length - 1]);
if (lfs.exists(ptemp))
;
lfs.delete(ptemp, true);
lfs.copyToLocalFile(true, new Path(src), dstPath);


}
}

package naivebayes;


import java.util.Scanner;


public class NaiveBayesTrain {
public static class TrainMapper extends
Mapper<Object, Text, Text, IntWritable> {
public NaiveBayesConf nBConf;
private final static IntWritable one = new IntWritable(1);
private Text word;


public void setup(Context context) {
try {
nBConf = new NaiveBayesConf();
Configuration conf = context.getConfiguration();
nBConf.ReadNaiveBayesConf(conf.get("conf"), conf);
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
}
System.out.println("setup");
}


public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
Scanner scan = new Scanner(value.toString());
String str, vals[], temp;
int i;
word = new Text();
while (scan.hasNextLine()) {
str = scan.nextLine();
vals = str.split(" ");
word.set(vals[0]);
context.write(word, one);
for (i = 1; i < vals.length; i++) {
word = new Text();
temp = vals[0] + "#" + nBConf.proNames.get(i - 1);
temp += "#" + vals[i];
word.set(temp);
context.write(word, one);
}
}
}
}


public static class TrainReducer extends
Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();


public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
}


package naivebayes;


import java.io.BufferedReader;


public class NaiveBayesTrainData {
public HashMap<String, Integer> freq;


public NaiveBayesTrainData() {
freq = new HashMap<String, Integer>();
}


public void getData(String file, Configuration conf) throws IOException {
int i;
Path data_path = new Path(file);
Path file_path;
String temp[], line;
FileSystem hdfs = data_path.getFileSystem(conf);
FileStatus[] status = hdfs.listStatus(data_path);


for (i = 0; i < status.length; i++) {
file_path = status[i].getPath();
if (hdfs.getFileStatus(file_path).isDir() == true)
continue;
line = file_path.toString();
temp = line.split("/");
if (temp[temp.length - 1].substring(0, 5).equals("part-") == false)
continue;
System.err.println(line);
FSDataInputStream fin = hdfs.open(file_path);
InputStreamReader inr = new InputStreamReader(fin);
BufferedReader bfr = new BufferedReader(inr);
while ((line = bfr.readLine()) != null) {
String res[] = line.split("\t");
freq.put(res[0], new Integer(res[1]));
System.out.println(line);
}
bfr.close();
inr.close();
fin.close();
}
}


}


package naivebayes;


import java.util.Scanner;


public class NaiveBayesTest {
public static class TestMapper extends Mapper<Object, Text, Text, Text> {
public NaiveBayesConf nBConf;
public NaiveBayesTrainData nBTData;


public void setup(Context context) {
try {
Configuration conf = context.getConfiguration();


nBConf = new NaiveBayesConf();
nBConf.ReadNaiveBayesConf(conf.get("conf"), conf);
nBTData = new NaiveBayesTrainData();
nBTData.getData(conf.get("train_result"), conf);
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
}
}


public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
Scanner scan = new Scanner(value.toString());
String str, vals[], temp;
int i, j, k, fxyi, fyi, fyij, maxf, idx;
Text id;
Text cls;


while (scan.hasNextLine()) {
str = scan.nextLine();
vals = str.split(" ");
maxf = -100;
idx = -1;
for (i = 0; i < nBConf.class_num; i++) {
fxyi = 1;
String cl = nBConf.classNames.get(i);
Integer integer = nBTData.freq.get(cl);
if (integer == null)
fyi = 0;
else
fyi = integer.intValue();
for (j = 1; j < vals.length; j++) {
temp = cl + "#" + nBConf.proNames.get(j - 1) + "#"
+ vals[j];


integer = nBTData.freq.get(temp);
if (integer == null)
fyij = 0;
else
fyij = integer.intValue();
fxyi = fxyi * fyij;
}
if (fyi * fxyi > maxf) {
maxf = fyi * fxyi;
idx = i;
}
}
id = new Text(vals[0]);
cls = new Text(nBConf.classNames.get(idx));
context.write(id, cls);
}
}
}
}

这篇关于使用MapReduce实现Bayes算法的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/1078892

相关文章

不懂推荐算法也能设计推荐系统

本文以商业化应用推荐为例,告诉我们不懂推荐算法的产品,也能从产品侧出发, 设计出一款不错的推荐系统。 相信很多新手产品,看到算法二字,多是懵圈的。 什么排序算法、最短路径等都是相对传统的算法(注:传统是指科班出身的产品都会接触过)。但对于推荐算法,多数产品对着网上搜到的资源,都会无从下手。特别当某些推荐算法 和 “AI”扯上关系后,更是加大了理解的难度。 但,不了解推荐算法,就无法做推荐系

中文分词jieba库的使用与实景应用(一)

知识星球:https://articles.zsxq.com/id_fxvgc803qmr2.html 目录 一.定义: 精确模式(默认模式): 全模式: 搜索引擎模式: paddle 模式(基于深度学习的分词模式): 二 自定义词典 三.文本解析   调整词出现的频率 四. 关键词提取 A. 基于TF-IDF算法的关键词提取 B. 基于TextRank算法的关键词提取

使用SecondaryNameNode恢复NameNode的数据

1)需求: NameNode进程挂了并且存储的数据也丢失了,如何恢复NameNode 此种方式恢复的数据可能存在小部分数据的丢失。 2)故障模拟 (1)kill -9 NameNode进程 [lytfly@hadoop102 current]$ kill -9 19886 (2)删除NameNode存储的数据(/opt/module/hadoop-3.1.4/data/tmp/dfs/na

Hadoop数据压缩使用介绍

一、压缩原则 (1)运算密集型的Job,少用压缩 (2)IO密集型的Job,多用压缩 二、压缩算法比较 三、压缩位置选择 四、压缩参数配置 1)为了支持多种压缩/解压缩算法,Hadoop引入了编码/解码器 2)要在Hadoop中启用压缩,可以配置如下参数

Makefile简明使用教程

文章目录 规则makefile文件的基本语法:加在命令前的特殊符号:.PHONY伪目标: Makefilev1 直观写法v2 加上中间过程v3 伪目标v4 变量 make 选项-f-n-C Make 是一种流行的构建工具,常用于将源代码转换成可执行文件或者其他形式的输出文件(如库文件、文档等)。Make 可以自动化地执行编译、链接等一系列操作。 规则 makefile文件

hdu1043(八数码问题,广搜 + hash(实现状态压缩) )

利用康拓展开将一个排列映射成一个自然数,然后就变成了普通的广搜题。 #include<iostream>#include<algorithm>#include<string>#include<stack>#include<queue>#include<map>#include<stdio.h>#include<stdlib.h>#include<ctype.h>#inclu

康拓展开(hash算法中会用到)

康拓展开是一个全排列到一个自然数的双射(也就是某个全排列与某个自然数一一对应) 公式: X=a[n]*(n-1)!+a[n-1]*(n-2)!+...+a[i]*(i-1)!+...+a[1]*0! 其中,a[i]为整数,并且0<=a[i]<i,1<=i<=n。(a[i]在不同应用中的含义不同); 典型应用: 计算当前排列在所有由小到大全排列中的顺序,也就是说求当前排列是第

使用opencv优化图片(画面变清晰)

文章目录 需求影响照片清晰度的因素 实现降噪测试代码 锐化空间锐化Unsharp Masking频率域锐化对比测试 对比度增强常用算法对比测试 需求 对图像进行优化,使其看起来更清晰,同时保持尺寸不变,通常涉及到图像处理技术如锐化、降噪、对比度增强等 影响照片清晰度的因素 影响照片清晰度的因素有很多,主要可以从以下几个方面来分析 1. 拍摄设备 相机传感器:相机传

csu 1446 Problem J Modified LCS (扩展欧几里得算法的简单应用)

这是一道扩展欧几里得算法的简单应用题,这题是在湖南多校训练赛中队友ac的一道题,在比赛之后请教了队友,然后自己把它a掉 这也是自己独自做扩展欧几里得算法的题目 题意:把题意转变下就变成了:求d1*x - d2*y = f2 - f1的解,很明显用exgcd来解 下面介绍一下exgcd的一些知识点:求ax + by = c的解 一、首先求ax + by = gcd(a,b)的解 这个

综合安防管理平台LntonAIServer视频监控汇聚抖动检测算法优势

LntonAIServer视频质量诊断功能中的抖动检测是一个专门针对视频稳定性进行分析的功能。抖动通常是指视频帧之间的不必要运动,这种运动可能是由于摄像机的移动、传输中的错误或编解码问题导致的。抖动检测对于确保视频内容的平滑性和观看体验至关重要。 优势 1. 提高图像质量 - 清晰度提升:减少抖动,提高图像的清晰度和细节表现力,使得监控画面更加真实可信。 - 细节增强:在低光条件下,抖