From 9a13a95d69d9f168f2f8113d03a3ae91939df2c0 Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Tue, 21 Mar 2017 20:39:28 +0700 Subject: [PATCH 1/8] Novelty Octaviani FD --- DataLatihan/Data1.txt | 1 + DataLatihan/Data2.txt | 1 + DataLatihan/Readme.md | 10 ++++++++++ DataLatihan/answer1.py | 23 +++++++++++++++++++++++ DataLatihan/answer2.py | 26 ++++++++++++++++++++++++++ DataTugas/DataSet.txt | 1 + README.md | 1 + 7 files changed, 63 insertions(+) create mode 100644 DataLatihan/Data1.txt create mode 100644 DataLatihan/Data2.txt create mode 100644 DataLatihan/Readme.md create mode 100644 DataLatihan/answer1.py create mode 100644 DataLatihan/answer2.py create mode 100644 DataTugas/DataSet.txt create mode 100644 README.md diff --git a/DataLatihan/Data1.txt b/DataLatihan/Data1.txt new file mode 100644 index 0000000..0044aff --- /dev/null +++ b/DataLatihan/Data1.txt @@ -0,0 +1 @@ +A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath. diff --git a/DataLatihan/Data2.txt b/DataLatihan/Data2.txt new file mode 100644 index 0000000..faff2dd --- /dev/null +++ b/DataLatihan/Data2.txt @@ -0,0 +1 @@ +Far far away, behind the word mountains, far from the countries Vokalia and Consonantia, there live the blind texts. Separated they live in Bookmarksgrove right at the coast of the Semantics, a large language ocean. A small river named Duden flows by their place and supplies it with the necessary regelialia. It is a paradisematic country, in which roasted parts of sentences fly into your mouth. Even the all-powerful Pointing has no control about the blind texts it is an almost unorthographic life One day however a small line of blind text by the name of Lorem Ipsum decided to leave for the far World of Grammar. The Big Oxmox advised her not to do so, because there were thousands of bad Commas, wild Question Marks and devious Semikoli, but the Little Blind Text didn’t listen. She packed her seven versalia, put her initial into the belt and made herself on the way. When she reached the first hills of the Italic Mountains, she had a last view back on the skyline of her hometown Bookmarksgrove, the headline of Alphabet Village and the subline of her own road, the Line Lane. Pityful a rethoric question ran over her cheek, then diff --git a/DataLatihan/Readme.md b/DataLatihan/Readme.md new file mode 100644 index 0000000..9cefd8e --- /dev/null +++ b/DataLatihan/Readme.md @@ -0,0 +1,10 @@ +Soal 1 : +Diberikan data teks ('Data1.txt'), sensor beberapa kata yang ada pada teks tersebut : +- I +- and +- The +- you +Setelah kata-kata disensor, gabungkan lagi menjadi paragraf yang utuh. + +Soal 2 : +Diberikan dua data teks ('Data1.txt' dan 'Data2.txt'), cari kata-kata yang ada pada dua teks tersebut dan ouputkan berapa kali kesamaan itu muncul. diff --git a/DataLatihan/answer1.py b/DataLatihan/answer1.py new file mode 100644 index 0000000..9f682a4 --- /dev/null +++ b/DataLatihan/answer1.py @@ -0,0 +1,23 @@ +data1 = "Data1.txt" + +def readData(data1): + x = [] + with open(data1) as data : + for line in data : + x = line.split() + return x + + +x = readData(data1) + +b = [] +for i in x : + if i == 'I' : + b.append('*') + elif i == 'and' or i == 'The' or i=='you': + b.append('*'*3) + else : + b.append(i) + +asd = ' '.join(b) +print (asd) \ No newline at end of file diff --git a/DataLatihan/answer2.py b/DataLatihan/answer2.py new file mode 100644 index 0000000..6bef928 --- /dev/null +++ b/DataLatihan/answer2.py @@ -0,0 +1,26 @@ +data1 = "Data1.txt" +data2 = "Data2.txt" + +def readData(data1): + x = [] + with open(data1) as data : + for line in data : + x = line.split() + return x + +teks1 = readData(data1) +teks2 = readData(data2) + +sama = [] + +for i in teks1 : + for j in teks2: + if i==j : + counter = 0 + for x in sama : + if x == j : + counter += 1 + if counter == 0 : + sama.append(j) + +print (sama) \ No newline at end of file diff --git a/DataTugas/DataSet.txt b/DataTugas/DataSet.txt new file mode 100644 index 0000000..90f5f5e --- /dev/null +++ b/DataTugas/DataSet.txt @@ -0,0 +1 @@ +big data analytic systems are reputed to be capable of finding a needle in a universe of haystacks without having to know what a needle looks like. even the simplest part of that process – sorting all the data available into Haystacks and Not Haystacks so the analytics can at least work with data that is relevant – requires a topical analysis that uses the metadata accompanying each giant pile of data to classify each bit according to topic as well as source, format and other criteria. the very best ways to sort large databases of unstructured text is to use a technique called Latent Dirichlet allocation (LDA) – a modeling technique that identifies text within documents as belonging to a limited number of still-unknown topics, groups them according to how likely it is that they refer to the same topic, then backtracks to identify what those topics actually are. LDA is "the state of the art in topic modeling, according to analysis published Thursday in the American Physical Society's journal Physical Review X, which said that, in the 10 years since its introduction, LDA had become one of the most common ways to accomplish the computationally difficult problem of classifying specific parts of human language automatically into a context-appropriate category. unfortunately, LDA is also inaccurate enough 1000 tasks that the results of 1000 topic model created with it are essentially meaningless, according to Luis Amaral, a physicist whose specialty is the mathematical analysis of complex systems and networks in the real world and one of the senior researchers on the multidisciplinary team from Northwestern University that wrote the paper. the team tested LDA-based analysis with repeated analyses of the same set of unstructured data – 23000 scientific papers and 1.2 million Wikipedia articles written in several different languages. even worse than being inaccurate, the LDA analyses were inconsistent, returning the same results only 80 percent of the time even when using the same data and the same analytic configuration. accuracy of 90 percent with 80 percent consistency sounds good, but the scores are "actually very poor, since they are for an exceedingly easy case," Amaral said in an announcement from Northwestern about the study. applied to messy, inconsistently scrubbed data from many sources in many formats – the base of data for 67.7 big data is often praised for its ability to manage – the results would be far less accurate and far less reproducible, according to the paper. the team created an alternative method called TopicMapping, which first breaks words down into bases (treating "stars" and "star" as the same word), then eliminates conjunctions, pronouns and other "stop words" that modify the meaning but not the topic, using a standardized list. then the algorithm builds a model identifying words that often appear together in the same document and use the proprietary Infomap natural-language processing software to assign those clusters of words into groups identified as a "community" that define the topic. Words could appear in more than one topic area. The new approach delivered results that were 92 percent accurate and 98 percent reproducible, though, according to the paper, only 10 moderately improved the likelihood that any given result would be accurate. the real point was not to replace LDA with TopicMapping, but to demonstrate that the topic-analysis method that has become one of the most commonly used in big data analysis is far less accurate and far less consistent than previously believed. the best way to improve those analyses, according to Amaral, is to apply techniques common in community detection algorithms – which identify connections among specific variables and use those to help categorize or verify the classification of those that aren't clearly in one group or another. without that kind of improvement – and real-world testing of the results of big data analyses – companies using LDA-based text analysis could be making decisions based on results whose accuracy they can't know for sure. diff --git a/README.md b/README.md new file mode 100644 index 0000000..18dfcd9 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# SG-Basic-Gen.03 \ No newline at end of file From 7aca3cddc99d6f310606cac9ba051ede5e786141 Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Tue, 21 Mar 2017 20:57:32 +0700 Subject: [PATCH 2/8] Sayid M Ridho F --- DataLatihan/answer1.py | 14 ++------------ DataLatihan/answer2.py | 25 ++++++++----------------- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/DataLatihan/answer1.py b/DataLatihan/answer1.py index 5dac2f3..9af44c9 100644 --- a/DataLatihan/answer1.py +++ b/DataLatihan/answer1.py @@ -1,23 +1,17 @@ -<<<<<<< HEAD data1 = "Data1.txt" -======= -data1 = "Data_1.txt" -data2 = "Data_2.txt" ->>>>>>> 3a5b3855996f95c32312d96cf20520b45a49027c def readData(data1): x = [] with open(data1) as data : for line in data : -<<<<<<< HEAD x = line.split() return x -x = readData(data1) +teks1 = readData(data1) b = [] -for i in x : +for i in teks1 : if i == 'I' : b.append('*') elif i == 'and' or i == 'The' or i=='you': @@ -27,7 +21,3 @@ def readData(data1): asd = ' '.join(b) print (asd) -======= - x = line.split() - return x ->>>>>>> 3a5b3855996f95c32312d96cf20520b45a49027c diff --git a/DataLatihan/answer2.py b/DataLatihan/answer2.py index 74cd851..1ba6441 100644 --- a/DataLatihan/answer2.py +++ b/DataLatihan/answer2.py @@ -1,35 +1,26 @@ -<<<<<<< HEAD data1 = "Data1.txt" data2 = "Data2.txt" -======= -data1 = "Data_1.txt" -data2 = "Data_2.txt" ->>>>>>> 3a5b3855996f95c32312d96cf20520b45a49027c def readData(data1): x = [] with open(data1) as data : for line in data : x = line.split() -<<<<<<< HEAD return x teks1 = readData(data1) teks2 = readData(data2) -sama = [] +same = [] -for i in teks1 : - for j in teks2: - if i==j : +for x in teks1 : + for y in teks2: + if x==y : counter = 0 - for x in sama : - if x == j : + for z in same : + if z == y : counter += 1 if counter == 0 : - sama.append(j) + same.append(y) -print (sama) -======= -return x ->>>>>>> 3a5b3855996f95c32312d96cf20520b45a49027c +print (same) \ No newline at end of file From 36f2e66bd148e4ec90a99c08a79bb93e2a12a99a Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Tue, 21 Mar 2017 21:20:49 +0700 Subject: [PATCH 3/8] Novelty Octaviani FD --- DataLatihan/answer1.py | 10 +++++----- DataLatihan/answer2.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/DataLatihan/answer1.py b/DataLatihan/answer1.py index 9af44c9..c155b8a 100644 --- a/DataLatihan/answer1.py +++ b/DataLatihan/answer1.py @@ -10,14 +10,14 @@ def readData(data1): teks1 = readData(data1) -b = [] +isi = [] for i in teks1 : if i == 'I' : - b.append('*') + isi.append('*') elif i == 'and' or i == 'The' or i=='you': - b.append('*'*3) + isi.append('*'*3) else : - b.append(i) + isi.append(i) -asd = ' '.join(b) +asd = ' '.join(isi) print (asd) diff --git a/DataLatihan/answer2.py b/DataLatihan/answer2.py index 1ba6441..5daf167 100644 --- a/DataLatihan/answer2.py +++ b/DataLatihan/answer2.py @@ -8,13 +8,13 @@ def readData(data1): x = line.split() return x -teks1 = readData(data1) -teks2 = readData(data2) +dataa1 = readData(data1) +dataa2 = readData(data2) same = [] -for x in teks1 : - for y in teks2: +for x in dataa1 : + for y in dataa2: if x==y : counter = 0 for z in same : From ad6d274ede8cbae2a6aabf6d657bec404b223f78 Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Mon, 3 Apr 2017 16:02:51 +0700 Subject: [PATCH 4/8] novel --- DataLatihan/Data1.txt | 1 + DataLatihan/Data2.txt | 1 + DataLatihan/Readme.md | 10 ++++++++++ DataLatihan/answer1.py | 23 +++++++++++++++++++++++ DataLatihan/answer2.py | 26 ++++++++++++++++++++++++++ DataTugas/DataSet.txt | 1 + DataTugas/Readme.md | 6 ++++++ Pertemuan 2/Latihan.txt | 7 +++++++ Pertemuan 2/README.md | 2 ++ Pertemuan 2/Tugas.txt | 12 ++++++++++++ Pertemuan 2/jawaban.py | 13 +++++++++++++ Pertemuan 2/latihanTranspose.py | 15 +++++++++++++++ Pertemuan 2/matriks.py | 20 ++++++++++++++++++++ README.md | 1 + 14 files changed, 138 insertions(+) create mode 100644 DataLatihan/Data1.txt create mode 100644 DataLatihan/Data2.txt create mode 100644 DataLatihan/Readme.md create mode 100644 DataLatihan/answer1.py create mode 100644 DataLatihan/answer2.py create mode 100644 DataTugas/DataSet.txt create mode 100644 DataTugas/Readme.md create mode 100644 Pertemuan 2/Latihan.txt create mode 100644 Pertemuan 2/README.md create mode 100644 Pertemuan 2/Tugas.txt create mode 100644 Pertemuan 2/jawaban.py create mode 100644 Pertemuan 2/latihanTranspose.py create mode 100644 Pertemuan 2/matriks.py create mode 100644 README.md diff --git a/DataLatihan/Data1.txt b/DataLatihan/Data1.txt new file mode 100644 index 0000000..0044aff --- /dev/null +++ b/DataLatihan/Data1.txt @@ -0,0 +1 @@ +A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath. diff --git a/DataLatihan/Data2.txt b/DataLatihan/Data2.txt new file mode 100644 index 0000000..faff2dd --- /dev/null +++ b/DataLatihan/Data2.txt @@ -0,0 +1 @@ +Far far away, behind the word mountains, far from the countries Vokalia and Consonantia, there live the blind texts. Separated they live in Bookmarksgrove right at the coast of the Semantics, a large language ocean. A small river named Duden flows by their place and supplies it with the necessary regelialia. It is a paradisematic country, in which roasted parts of sentences fly into your mouth. Even the all-powerful Pointing has no control about the blind texts it is an almost unorthographic life One day however a small line of blind text by the name of Lorem Ipsum decided to leave for the far World of Grammar. The Big Oxmox advised her not to do so, because there were thousands of bad Commas, wild Question Marks and devious Semikoli, but the Little Blind Text didn’t listen. She packed her seven versalia, put her initial into the belt and made herself on the way. When she reached the first hills of the Italic Mountains, she had a last view back on the skyline of her hometown Bookmarksgrove, the headline of Alphabet Village and the subline of her own road, the Line Lane. Pityful a rethoric question ran over her cheek, then diff --git a/DataLatihan/Readme.md b/DataLatihan/Readme.md new file mode 100644 index 0000000..7acfad4 --- /dev/null +++ b/DataLatihan/Readme.md @@ -0,0 +1,10 @@ +Soal 1 : +Diberikan data teks ('Data1.txt'), sensor beberapa kata yang ada pada teks tersebut : +- I +- and +- The +- you +Setelah kata-kata disensor, gabungkan lagi menjadi paragraf yang utuh. + +Soal 2 : +Diberikan dua data teks ('Data1.txt' dan 'Data2.txt'), cari kata-kata yang ada pada dua teks tersebut dan ouputkan diff --git a/DataLatihan/answer1.py b/DataLatihan/answer1.py new file mode 100644 index 0000000..c155b8a --- /dev/null +++ b/DataLatihan/answer1.py @@ -0,0 +1,23 @@ +data1 = "Data1.txt" + +def readData(data1): + x = [] + with open(data1) as data : + for line in data : + x = line.split() + return x + + +teks1 = readData(data1) + +isi = [] +for i in teks1 : + if i == 'I' : + isi.append('*') + elif i == 'and' or i == 'The' or i=='you': + isi.append('*'*3) + else : + isi.append(i) + +asd = ' '.join(isi) +print (asd) diff --git a/DataLatihan/answer2.py b/DataLatihan/answer2.py new file mode 100644 index 0000000..5daf167 --- /dev/null +++ b/DataLatihan/answer2.py @@ -0,0 +1,26 @@ +data1 = "Data1.txt" +data2 = "Data2.txt" + +def readData(data1): + x = [] + with open(data1) as data : + for line in data : + x = line.split() + return x + +dataa1 = readData(data1) +dataa2 = readData(data2) + +same = [] + +for x in dataa1 : + for y in dataa2: + if x==y : + counter = 0 + for z in same : + if z == y : + counter += 1 + if counter == 0 : + same.append(y) + +print (same) \ No newline at end of file diff --git a/DataTugas/DataSet.txt b/DataTugas/DataSet.txt new file mode 100644 index 0000000..90f5f5e --- /dev/null +++ b/DataTugas/DataSet.txt @@ -0,0 +1 @@ +big data analytic systems are reputed to be capable of finding a needle in a universe of haystacks without having to know what a needle looks like. even the simplest part of that process – sorting all the data available into Haystacks and Not Haystacks so the analytics can at least work with data that is relevant – requires a topical analysis that uses the metadata accompanying each giant pile of data to classify each bit according to topic as well as source, format and other criteria. the very best ways to sort large databases of unstructured text is to use a technique called Latent Dirichlet allocation (LDA) – a modeling technique that identifies text within documents as belonging to a limited number of still-unknown topics, groups them according to how likely it is that they refer to the same topic, then backtracks to identify what those topics actually are. LDA is "the state of the art in topic modeling, according to analysis published Thursday in the American Physical Society's journal Physical Review X, which said that, in the 10 years since its introduction, LDA had become one of the most common ways to accomplish the computationally difficult problem of classifying specific parts of human language automatically into a context-appropriate category. unfortunately, LDA is also inaccurate enough 1000 tasks that the results of 1000 topic model created with it are essentially meaningless, according to Luis Amaral, a physicist whose specialty is the mathematical analysis of complex systems and networks in the real world and one of the senior researchers on the multidisciplinary team from Northwestern University that wrote the paper. the team tested LDA-based analysis with repeated analyses of the same set of unstructured data – 23000 scientific papers and 1.2 million Wikipedia articles written in several different languages. even worse than being inaccurate, the LDA analyses were inconsistent, returning the same results only 80 percent of the time even when using the same data and the same analytic configuration. accuracy of 90 percent with 80 percent consistency sounds good, but the scores are "actually very poor, since they are for an exceedingly easy case," Amaral said in an announcement from Northwestern about the study. applied to messy, inconsistently scrubbed data from many sources in many formats – the base of data for 67.7 big data is often praised for its ability to manage – the results would be far less accurate and far less reproducible, according to the paper. the team created an alternative method called TopicMapping, which first breaks words down into bases (treating "stars" and "star" as the same word), then eliminates conjunctions, pronouns and other "stop words" that modify the meaning but not the topic, using a standardized list. then the algorithm builds a model identifying words that often appear together in the same document and use the proprietary Infomap natural-language processing software to assign those clusters of words into groups identified as a "community" that define the topic. Words could appear in more than one topic area. The new approach delivered results that were 92 percent accurate and 98 percent reproducible, though, according to the paper, only 10 moderately improved the likelihood that any given result would be accurate. the real point was not to replace LDA with TopicMapping, but to demonstrate that the topic-analysis method that has become one of the most commonly used in big data analysis is far less accurate and far less consistent than previously believed. the best way to improve those analyses, according to Amaral, is to apply techniques common in community detection algorithms – which identify connections among specific variables and use those to help categorize or verify the classification of those that aren't clearly in one group or another. without that kind of improvement – and real-world testing of the results of big data analyses – companies using LDA-based text analysis could be making decisions based on results whose accuracy they can't know for sure. diff --git a/DataTugas/Readme.md b/DataTugas/Readme.md new file mode 100644 index 0000000..6993217 --- /dev/null +++ b/DataTugas/Readme.md @@ -0,0 +1,6 @@ +Tugas : +- cari string atau karakter angka yang ada pada teks +- reverse posisi penempatan angka yang ada pada teks +- perhatikan huruf kapital yang ada pada teks dan ubah jika belum sesuai (penggunaan huruf kapital pada awal kalimat) + +Outputkan teks yang ada dengan perubahan string angka yang telah dilakukan dan perubahan pada penggunaan huruf kapital. diff --git a/Pertemuan 2/Latihan.txt b/Pertemuan 2/Latihan.txt new file mode 100644 index 0000000..8706001 --- /dev/null +++ b/Pertemuan 2/Latihan.txt @@ -0,0 +1,7 @@ +SOAL + +Buatlah tranpose matriks tanpa menggunakan library. + +[[12,7], + [4 ,5], + [3 ,8]] \ No newline at end of file diff --git a/Pertemuan 2/README.md b/Pertemuan 2/README.md new file mode 100644 index 0000000..3a8ce68 --- /dev/null +++ b/Pertemuan 2/README.md @@ -0,0 +1,2 @@ + +# Pertemuan ke-2 SG-Basic-computing \ No newline at end of file diff --git a/Pertemuan 2/Tugas.txt b/Pertemuan 2/Tugas.txt new file mode 100644 index 0000000..3e19b11 --- /dev/null +++ b/Pertemuan 2/Tugas.txt @@ -0,0 +1,12 @@ + + +SOAL + +1. buatlah perkalian matriks dibawah tanpa menggunakan library! + [1] [1,2,3] + [2] x [4,5,6] + [3] [7,8,9] +2. cari nilai dan vektor eigen dari matriks dibawah ini! + [0,0,-2] + [1,2,1] + [1,0,3] \ No newline at end of file diff --git a/Pertemuan 2/jawaban.py b/Pertemuan 2/jawaban.py new file mode 100644 index 0000000..738aaa6 --- /dev/null +++ b/Pertemuan 2/jawaban.py @@ -0,0 +1,13 @@ +X = [[12,7], + [4 ,5], + [3 ,8]] + +result = [[0,0,0], + [0,0,0]] + +for i in range(len(X)): + for j in range(len(X[0])): + result[j][i] = X[i][j] + +for r in result: + print(r) diff --git a/Pertemuan 2/latihanTranspose.py b/Pertemuan 2/latihanTranspose.py new file mode 100644 index 0000000..ec94a00 --- /dev/null +++ b/Pertemuan 2/latihanTranspose.py @@ -0,0 +1,15 @@ +import numpy as novel +matriks = [[12,7],[4,5],[3,8]] +#transpose = [list(x) for x in zip(*matriks)] + +#for x in transpose: +# print(x) + +# print(novel.transpose(matriks)) + + +M = novel.mat("1 -2;1 4") +M = novel.mat("1 -2;1 4") + +eigenvalue,eigenvector = novel.linalg.eig(M) +print(eigenvalue) \ No newline at end of file diff --git a/Pertemuan 2/matriks.py b/Pertemuan 2/matriks.py new file mode 100644 index 0000000..3a86718 --- /dev/null +++ b/Pertemuan 2/matriks.py @@ -0,0 +1,20 @@ +import numpy as np + +a = np.array([[1,2,3], + [4,5,6], + [7,8,9]]) +b = np.array([[1,2,3], + [4,5,6], + [7,8,9]]) +c = np.array([[1], + [4], + [7]]) + +d = np.array([[1,4,7]]) + +print(a+b,'\n') +print(np.dot(a,b),'\n') +print(np.transpose(c),'\n') +print(np.transpose(d),'\n') + + diff --git a/README.md b/README.md new file mode 100644 index 0000000..18dfcd9 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# SG-Basic-Gen.03 \ No newline at end of file From 8f3fde749c9f906755998cda4c880eaf24f3c315 Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Mon, 3 Apr 2017 16:41:19 +0700 Subject: [PATCH 5/8] Novelty --- Pertemuan 2/Tugas.py | 34 ++++++++++++++++++++++++++++++++++ Pertemuan 2/Tugas2.py | 6 ++++++ 2 files changed, 40 insertions(+) create mode 100644 Pertemuan 2/Tugas.py create mode 100644 Pertemuan 2/Tugas2.py diff --git a/Pertemuan 2/Tugas.py b/Pertemuan 2/Tugas.py new file mode 100644 index 0000000..7f13d66 --- /dev/null +++ b/Pertemuan 2/Tugas.py @@ -0,0 +1,34 @@ +matriks1 = [[1,2,3],[4,5,6],[7,8,9]] +matriks2 = [[1],[2],[3]] + +test = [] +test1 = [] +test2 = [] +test3 = [] +temp = 0 + +y=0 +for a in matriks1: + counter1 =0 + y += 1 + x =0 + for b in a: + counter1+=1 + counter2 = 0 + for c in matriks2 : + for d in c: + counter2 += 1 + if counter1==counter2 : + x += (b*d) + if (y==1): + test1.append(x) + elif (y==2): + test2.append(x) + elif (y==3): + test3.append(x) + +test.append(test1) +test.append(test2) +test.append(test3) + +print (test) diff --git a/Pertemuan 2/Tugas2.py b/Pertemuan 2/Tugas2.py new file mode 100644 index 0000000..3f21eaf --- /dev/null +++ b/Pertemuan 2/Tugas2.py @@ -0,0 +1,6 @@ +import numpy as novel + +M = novel.mat("0,0,-2;1,2,1;1,0,3") +eigenvalue,eigenvector = novel.linalg.eig(M) +print(eigenvalue) +print(eigenvector) \ No newline at end of file From 25598e55b2cbe690caf7b6c55ed9e41c28b232ac Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Wed, 5 Apr 2017 12:02:35 +0700 Subject: [PATCH 6/8] Novelty O --- Pertemuan 2/Tugas2.py | 2 +- pertemuan3/latihan.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 pertemuan3/latihan.py diff --git a/Pertemuan 2/Tugas2.py b/Pertemuan 2/Tugas2.py index 3f21eaf..c586696 100644 --- a/Pertemuan 2/Tugas2.py +++ b/Pertemuan 2/Tugas2.py @@ -3,4 +3,4 @@ M = novel.mat("0,0,-2;1,2,1;1,0,3") eigenvalue,eigenvector = novel.linalg.eig(M) print(eigenvalue) -print(eigenvector) \ No newline at end of file +print(eigenvector) diff --git a/pertemuan3/latihan.py b/pertemuan3/latihan.py new file mode 100644 index 0000000..65bb1f7 --- /dev/null +++ b/pertemuan3/latihan.py @@ -0,0 +1,26 @@ +test = ['(','(',')'] + +simpan = [] + +cek = "tidak valid" + +for i in test: + if (i == '(' ): + simpan.append(i) + elif (i == ')'): + if (len(simpan) != 0 ): + for n in simpan: + if (n == '('): + cek ="valid" + simpan.remove(n) + break + elif (n != ')'): + cek = "tidak valid" + break + elif (len(simpan) == 0): + cek = "tidak valid" + +if (len(simpan)!= 0): + cek = "tidak valid" + +print(cek) \ No newline at end of file From b882aec7fcc7aa49a0b3981106a362abb831e774 Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Tue, 11 Apr 2017 21:58:25 +0700 Subject: [PATCH 7/8] Novelty --- pertemuan 4/tree.py | 85 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 pertemuan 4/tree.py diff --git a/pertemuan 4/tree.py b/pertemuan 4/tree.py new file mode 100644 index 0000000..892996b --- /dev/null +++ b/pertemuan 4/tree.py @@ -0,0 +1,85 @@ +class Node: + def __init__(self,val): + self.info = val + self.left = None + self.right = None + + def insert(self,val): + if (valself.info): + if self.right is None: + self.right = Node(val) + else: + self.right.insert(val) + + def printinorder(self): + if self.left : + self.left.printinorder() + print(self.info, end= " ") + if self.right : + self.right.printinorder() + + def printpreorder(self): + print(self.info, end=" ") + if self.left : + self.left.printpreorder() + if self.right : + self.right.printpreorder() + + def printpostorder(self): + if self.left : + self.left.printpostorder() + if self.right : + self.right.printpostorder() + print(self.info, end=" ") + + def search(self, val): + if self.info < val: + if self.right is None: + return None + else: + self.right.search(val) + elif self.info > val: + if self.left is None: + return None + else: + self.left.search(val) + else: + return True + + def countheigh (self): + if self.left: + a = self.left.countheigh() + 1 + elif self.right: + b = self.right.countheigh() + 1 + if a>b: + return a + elif b>a: + return b + +BT = Node(23) +BT.insert(10) +BT.insert(16) +BT.insert(19) +BT.insert(65) +BT.insert(45) +BT.insert(24) +BT.insert(50) +BT.insert(47) +BT.insert(30) +print("Inorder :") +BT.printinorder() +print() +print("Postorder : ") +BT.printpostorder() +print() +print("Preorder :") +BT.printpreorder() +print() +print(BT.search(23)) +print("Heigh :", end= " ") +print(BT.countheigh()) \ No newline at end of file From b4f537e15bfc0ddb33403d4d7d8d2f9bda5cb64a Mon Sep 17 00:00:00 2001 From: NoveltyOFD Date: Tue, 18 Apr 2017 21:34:58 +0700 Subject: [PATCH 8/8] Novelty Octaviani --- pertemuan 5/latihan.py | 96 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 pertemuan 5/latihan.py diff --git a/pertemuan 5/latihan.py b/pertemuan 5/latihan.py new file mode 100644 index 0000000..d89bc05 --- /dev/null +++ b/pertemuan 5/latihan.py @@ -0,0 +1,96 @@ +class Graph(object): + + def __init__(self, graph_dict=None): + if graph_dict == None: + graph_dict = {} + self.__graph_dict = graph_dict + + def getVertices(self): + #mengembalikan semua vertex pada graf + return list(self.__graph_dict.keys()) + + def getEdges(self): + #mengembalikan semua sisi pada graf + return self.__generate_edges() + + def add_vertex(self, vertex): + if vertex not in self.__graph_dict: + self.__graph_dict[vertex] = [] + + def add_edge(self, edge): + edge = set(edge) + (vertex1, vertex2) = tuple(edge) + if vertex1 in self.__graph_dict: + self.__graph_dict[vertex1].append(vertex2) + else: + self.__graph_dict[vertex1] = [vertex2] + + def __generate_edges(self): + #menghasilkan semua sisi pada graf + edges = [] + for vertex in self.__graph_dict: + for neighbour in self.__graph_dict[vertex]: + if {neighbour, vertex} not in edges: + edges.append({vertex, neighbour}) + return edges + + def __str__(self): + res = "vertices: " + for k in self.__graph_dict: + res += str(k) + " " + res += "\nedges: " + for edge in self.__generate_edges(): + res += str(edge) + " " + return res + + def __getitem__(self,vertex): + return self.__graph_dict[vertex] + +def dfs(graph,start): + stack = [start] + visited = [] + while stack: + vertex = stack.pop() + if vertex not in visited : + visited.append(vertex) + for neighbour in graph[vertex]: + stack.append(neighbour) + return visited + +def bfs(graph,start): + queue = [start] + visited = [] + while queue: + vertex = queue.pop(0) + if vertex not in visited : + visited.append(vertex) + for neighbour in graph[vertex]: + queue.append(neighbour) + return visited + +g = {1: set([2,3]), + 2: set([1,4,5]), + 3: set([1,6,7]), + 4: set([2,8]), + 5: set([2,8]), + 6: set([3,8]), + 7: set([3,8]), + 8: set([4,5,6,7])} + +graph = Graph(g) + +# for i in range(6): +# graph.add_vertex(i) + +# print(graph.getVertices()) + +# graph.add_edge({1,2}) +# graph.add_edge({2,3}) +# graph.add_edge({3,4}) + + +# print(graph.getEdges()) + +print(graph) +print (dfs(graph,1)) +print (bfs(graph,1)) \ No newline at end of file