Strict Standards: Declaration of Doku_Renderer_metadata::table_open() should be compatible with Doku_Renderer::table_open($maxcols = NULL, $numrows = NULL, $pos = NULL) in /home2/cp-wiki/htdocs/inc/parser/metadata.php on line 24

Strict Standards: Declaration of Doku_Renderer_metadata::table_close() should be compatible with Doku_Renderer::table_close($pos = NULL) in /home2/cp-wiki/htdocs/inc/parser/metadata.php on line 24

Warning: Cannot modify header information - headers already sent by (output started at /home2/cp-wiki/htdocs/inc/parser/metadata.php:24) in /home2/cp-wiki/htdocs/inc/actions.php on line 581

Warning: Cannot modify header information - headers already sent by (output started at /home2/cp-wiki/htdocs/inc/parser/metadata.php:24) in /home2/cp-wiki/htdocs/inc/actions.php on line 581
cs190c:lab10

Lab 10: First Hour

A solution to the dictionary problem:

# spell_check.py
#     Program to spell_check words in a text file.
#     Further illustrates Python dictionarys
 
import string
 
def spell_check():
    # Load the dictionary 
    text = open('dictionary.txt','r').read()
    text = string.lower(text)
    for ch in '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~':
        text = string.replace(text, ch, ' ')
    words = string.split(text)
 
    # construct a dictionary of the given words
    dictionary = {}
    for w in words:
        dictionary[w] = True
    print "Dictionary loaded."
 
    # get the sequence of words from the file
    fname = raw_input("File to analyze: ")
    text = open(fname,'r').read()
    text = string.lower(text)
    for ch in '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~1234567890':
        text = string.replace(text, ch, ' ')
    words = string.split(text)
 
    # construct a dictionary of words not found in dictionary
    orphans = {}
    for w in words:
        if dictionary.get(w) == None:
            orphans[w] = True
 
    # output analysis of n most frequent words.
    items = orphans.items()
    items.sort(cmp)
    for i in range(len(items)):
        print items[i][0]
 
if __name__ == '__main__':
    spell_check()

Second Hour

from networkx import *
import pylab as P
 
G = DiGraph()
G.add_edges_from([tuple(s.split()) for s in open('ebi.txt')])
 
print 'Number of nodes:', G.order()
print
 
C1 = component.strongly_connected_components(G)
 
print 'Number of strongly connected components:', len(C1)
for t in C1:
        if len(t) > 1:
            print "a strongly connected component of size > 1: ", len(t)
print
 
Gscc = DiGraph()
Gscc = subgraph(G, C1[0])
print "number of nodes in Gscc: ", Gscc.order()
 
P.figure(1)
draw_circular(Gscc)
P.show()