I found an error which is wrong is FileNotFoundError in python - database

I want to import the dataset but fail because he has an error that is there for two \ i do not know how he was generated
import pickle
import numpy as np
import os
def load_cifar_batch(filename):
with open(filename,'rb') as f :
datadict=pickle.load(f,encoding='bytes')
x=datadict[b'data']
y=datadict[b'labels']
x=x.reshape(10000,3,32,32).transpose(0,2,3,1).astype('float')
y=np.array(y)
return x,y
def load_cifar10(root):
xs=[]
ys=[]
for b in range(1,6):
f=os.path.join(root,'data_batch_%d' % (b,))
x,y=load_cifar_batch(f)
xs.append(x)
ys.append(y)
Xtrain=np.concatenate(xs) #1
Ytrain=np.concatenate(ys)
del x ,y
Xtest,Ytest=load_cifar_batch(os.path.join(root,'test_batch')) #2
return Xtrain,Ytrain,Xtest,Ytest
and then i try to run
import numpy as np
from data_utils import load_cifar10
import matplotlib.pyplot as plt
datadir='E:\python\waa\cifar10\cifar-10-batches-bin'
x_train,y_train,x_test,y_test=load_cifar10('datadir')
the error is
FileNotFoundError: [Errno 2] No such file or directory:'datadir\\data_batch_1'
There are two \ in my error,how to fix it?

Related

How to use specified values in each axis of an sns heat map?

Basically, I need to create a heatmap using seaborn with a set of variables in the y axis, and another set in the x axis. Currently I can only get it to show all variables in both axes, but because I have so many it is very difficult to read.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read('everything.csv')
a = df.drop(columns = ['1', '2', '3', '4'])
b = df.drop(columns = ['5', '6', '7'])
corr = df.corr(method = 'spearman')
sns.heatmap(corr, annot = True)
plt.show()
This is essentially what I am working with. I want a to be plot on one axis of the heatmap, and b on the other. I'm sure its an easy fix for someone more experienced than me.
You can calculate the complete correlation matrix, and then subset some rows and columns:
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
sns.set_style('white')
iris = sns.load_dataset('iris')
cols_A = ['sepal_length', 'sepal_width']
cols_B = ['petal_length', 'petal_width']
corr = iris[cols_A + cols_B].corr()
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
sns.heatmap(corr, vmin=-1, vmax=1, cmap='RdYlGn', annot=True, ax=ax1)
sns.heatmap(corr.loc[cols_A, cols_B], vmin=-1, vmax=1, cmap='RdYlGn', annot=True, ax=ax2)
plt.tight_layout()
plt.show()

How to make Matplotlib saved gif looping

Environment:
Matplotlib v2.2.2
Code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import PillowWriter
fig = plt.figure()
def f(x, y):
return np.sin(x) + np.cos(y)
x = np.linspace(0, 2 * np.pi, 120)
y = np.linspace(0, 2 * np.pi, 100).reshape(-1, 1)
# ims is a list of lists, each row is a list of artists to draw in the
# current frame; here we are just animating one artist, the image, in
# each frame
ims = []
for i in range(20):
x += np.pi / 15.
y += np.pi / 20.
im = plt.imshow(f(x, y))
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=500)
writer = PillowWriter(fps=20)
ani.save("demo2.gif", writer=writer)
plt.show()
Output:
It only play once.
Using imagemagick as a writer produces a looping gif, but I can't tell you why that does not work with PillowWriter
ani.save("demo2.gif", writer='imagemagick')
I found a workaround answer here. You can do the following:
from matplotlib.animation import PillowWriter
class LoopingPillowWriter(PillowWriter):
def finish(self):
self._frames[0].save(
self._outfile, save_all=True, append_images=self._frames[1:],
duration=int(1000 / self.fps), loop=0)
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=500)
ani.save('demo2.gif', writer=LoopingPillowWriter(fps=20))

Error when Importing keras in embedded python in C

I'm trying to embed python in my C application. I download the package in python official website and manage to do a simple Hello World.
Now I want to go deeper and use some libraries of python like numpy, keras, tensorflow...
I'm working with Python 3.5.4, I installed all the needed package on my PC with pip3 :
pip3 install keras
pip3 install tensorflow
...
then I created my script and launch it in python environment, it works fine :
Python:
# Importing the libraries
#
import numpy as np
import pandas as pd
dataset2 = pd.read_csv('I:\RNA\dataset19.csv')
X_test = dataset2.iloc[:, 0:228].values
y_test = dataset2.iloc[:, 228].values
# 2.
import pickle
sc = pickle.load(open('I:\RNA\isVerb_sc', 'rb'))
X_test = sc.transform(X_test)
# 3.
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
classifier = Sequential()
classifier.add(Dense(units = 114, kernel_initializer = 'uniform', activation = 'relu', input_dim = 228))
classifier.add(Dropout(p = 0.3))
classifier.add(Dense(units = 114, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.3))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.load_weights('I:\RNA\isVerb_weights.h5')
y_pred = classifier.predict(X_test)
y_pred1 = (y_pred > 0.5)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred1)
But when I execute the same script in a C environment with embed python it didn't work :
At first, I execute my script directly with PyRun_SimpleFile with no luck, so I sliced it in multiple instructions with PyRun_SimpleString to detect the problem :
C:
result = PyRun_SimpleString("import numpy as np"); // result = 0 (ok)
result = PyRun_SimpleString("import pandas as pd"); // result = 0 (ok)
...
result = PyRun_SimpleString("import pickle"); // result = 0 (ok)
... (all insctruction above works)
result = PyRun_SimpleString("import keras"); // result = -1 !!
... (all under this failed)
but there is not a single stack trace about this error, I tried this but I just got :
"Here's the output: (null)"
My initialization of Python in C seems correct since others libraries import fine :
// Python
wchar_t *stdProgramName = L"I:\\LIBs\\cpython354";
Py_SetProgramName(stdProgramName);
wchar_t *stdPythonHome = L"I:\\LIBs\\cpython354";
Py_SetPythonHome(stdPythonHome);
wchar_t *stdlib = L"I:\\LIBs\\cpython354;I:\\LIBs\\cpython354\\Lib\\python35.zip;I:\\LIBs\\cpython354\\Lib;I:\\LIBs\\cpython354\\DLLs;I:\\LIBs\\cpython354\\Lib\\site-packages";
Py_SetPath(stdlib);
// Initialize Python
Py_Initialize();
When inside a Python cmd, the line import keras take some time (3sec) but works (a warning but I found no harm around it) :
>>> import keras
I:\LIBs\cpython354\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
>>>
I'm at loss now, I don't know where to look at since there is no stack trace.
it seems like when you import keras, it executes this line :
sys.stderr.write('Using TensorFlow backend.\n')
or sys.stderr was not defined in python embedded on windows
A simple correction is to define sys.stderr, for example :
import sys
class CatchOutErr:
def __init__(self):
self.value = ''
def write(self, txt):
self.value += txt
catchOutErr = CatchOutErr()
sys.stderr = catchOutErr

Functor misses implicit value for parameter instance (only after sbt clean)

I did some experiments with Kittens (https://github.com/milessabin/kittens) and have issues with compiling my code. I receive the following error.
[error] ...danirey\scala\kittens\Kittens.scala:23: could not find implicit value for parameter instance: cats.Functor[danirey.scala.kittens.AdtDefns.Tree]
[error] val funct = Functor[Tree]
[error] ^
[error] one error found
[error] (compile:compileIncremental) Compilation failed
The complete File is as follows
package danirey.scala.kittens
/**
* #author Dani
*/
import cats.Functor
import cats.syntax.AllSyntax
import cats.derived.functor._
import legacy._
import cats.derived.iterable.legacy._
import org.typelevel.discipline.scalatest.Discipline
import shapeless.cachedImplicit
object Kittens extends App {
val ft = new FunctorExperiment()
ft.print()
}
class FunctorExperiment extends AllSyntax {
import AdtDefns._
def print():Unit = {
val funct = Functor[Tree]
val tree: Tree[String] = Node(
Leaf("Reto"),
Node(
Leaf("Sandra"),
Leaf("Mike")
)
)
println(funct.map(tree)(_.length))
}
}
I have use the almost identical code in a ScalaTest which compiles without any issues.
package danirey.scala.kittens
import cats.Functor
import cats.syntax.AllSyntax
import cats.derived.functor._
import legacy._
import cats.derived.iterable.legacy._
import org.scalatest.FunSuite
import org.typelevel.discipline.scalatest.Discipline
import shapeless.cachedImplicit
/**
* #author Dani
*/
class FunctorExperimentTest extends FunSuite with Discipline with AllSyntax {
import AdtDefns._
test("functors experiment") {
val funct = Functor[Tree]
val tree: Tree[String] = Node(
Leaf("Reto"),
Node(
Leaf("Sandra"),
Leaf("Mike")
)
)
println(funct.map(tree)(_.length))
}
}
My build.sbt looks as follows
name := "shapeless-experiments"
version := "1.0-SNAPSHOT"
scalaVersion := "2.11.8"
exportJars := true
libraryDependencies ++= Seq(
"com.chuusai" % "shapeless_2.11" % "2.3.0",
"org.typelevel" % "kittens_2.11" % "1.0.0-M2",
"org.scalatest" %% "scalatest" % "3.0.0-M7" % "test"
)
scalacOptions ++= Seq(
"-feature",
"-language:higherKinds",
"-language:implicitConversions",
"-unchecked"
)
The most interesting thing is, that it compiles as part of an incremental compile.
If I comment line number 16, 23 and 32, then execute "sbt compile",
then remove the comments again and execute "sbt compile/package" it compiles and I can even execute the program. But as soon as I run "sbt clean", it will not compile anymore.
The AdtDefns Object is basically a copy of https://github.com/milessabin/kittens/blob/master/core/src/test/scala/cats/derived/adtdefns.scala
The relevant part is
object AdtDefns {
sealed trait Tree[T]
final case class Leaf[T](t: T) extends Tree[T]
final case class Node[T](l: Tree[T], r: Tree[T]) extends Tree[T]
}
PS: Would be nice if someone could create a tag for scala-kittens
#DaniRey we use kittens in our projects but only the sequence part. I am not aware of any project that use kittens derivation. What's your user case?

Try statement in Cython for cimport (for use with mpi4py)

Is there a way to have the equivalent of the Python try statement in Cython for the cimport?
Something like that:
try:
cimport something
except ImportError:
pass
I would need this to write a Cython extension that can be compiled with or without mpi4py. This is very standard in compiled languages where the mpi commands can be put between #ifdef and #endif preprocessor directives. How can we obtain the same result in Cython?
I tried this but it does not work:
try:
from mpi4py import MPI
from mpi4py cimport MPI
from mpi4py.mpi_c cimport *
except ImportError:
rank = 0
nb_proc = 1
# solve a incompatibility between openmpi and mpi4py versions
cdef extern from 'mpi-compat.h': pass
does_it_work = 'Not yet'
Actually it works well if mpi4py is correctly installed but if
import mpi4py raises an ImportError, the Cython file does not
compile and I get the error:
Error compiling Cython file:
------------------------------------------------------------
...
try:
from mpi4py import MPI
from mpi4py cimport MPI
^
------------------------------------------------------------
mod.pyx:4:4: 'mpi4py.pxd' not found
The file setup.py:
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import os
here = os.path.abspath(os.path.dirname(__file__))
include_dirs = [here]
try:
import mpi4py
except ImportError:
pass
else:
INCLUDE_MPI = '/usr/lib/openmpi/include'
include_dirs.extend([
INCLUDE_MPI,
mpi4py.get_include()])
name = 'mod'
ext = Extension(
name,
include_dirs=include_dirs,
sources=['mod.pyx'])
setup(name=name,
cmdclass={"build_ext": build_ext},
ext_modules=[ext])
Using a try-catch block in this way is something you won't be able to do.
The extension module you are making must be statically compiled and linked against the things it uses cimport to load at the C-level. A try-catch block is something that will be executed when the module is imported, not when it is compiled.
On the other hand, in theory, you should be able to get the effect you're looking for using Cython's support for conditional compilation.
In your setup.py file you can check to see if the needed modules are defined and then define environment variables to be passed to the Cython compiler that, in turn, depend on whether or not the needed modules are present.
There's an example of how to do this in one of Cython's tests.
There they pass a dictionary containing the desired environment variables to the constructor for Cython's Extension class as the keyword argument pyrex_compile_time_env, which has been renamed to cython_compile_time_env, and for Cython.Build.Dependencies.cythonize is called compile_time_env).
Thank you for your very useful answer #IanH. I include an example to show what it gives.
The file setup.py:
from setuptools import setup
from Cython.Distutils.extension import Extension
from Cython.Distutils import build_ext
import os
here = os.path.abspath(os.path.dirname(__file__))
import numpy as np
include_dirs = [here, np.get_include()]
try:
import mpi4py
except ImportError:
MPI4PY = False
else:
MPI4PY = True
INCLUDE_MPI = '/usr/lib/openmpi/include'
include_dirs.extend([
INCLUDE_MPI,
mpi4py.get_include()])
name = 'mod'
ext = Extension(
name,
include_dirs=include_dirs,
cython_compile_time_env={'MPI4PY': MPI4PY},
sources=['mod.pyx'])
setup(name=name,
cmdclass={"build_ext": build_ext},
ext_modules=[ext])
if not MPI4PY:
print('Warning: since importing mpi4py raises an ImportError,\n'
' the extensions are compiled without mpi and \n'
' will work only in sequencial.')
And the file mod.pyx, with a little bit of real mpi commands:
import numpy as np
cimport numpy as np
try:
from mpi4py import MPI
except ImportError:
nb_proc = 1
rank = 0
else:
comm = MPI.COMM_WORLD
nb_proc = comm.size
rank = comm.Get_rank()
IF MPI4PY:
from mpi4py cimport MPI
from mpi4py.mpi_c cimport *
# solve an incompatibility between openmpi and mpi4py versions
cdef extern from 'mpi-compat.h': pass
print('mpi4py ok')
ELSE:
print('no mpi4py')
n = 8
if n % nb_proc != 0:
raise ValueError('The number of processes is incorrect.')
if rank == 0:
data_seq = np.ones([n], dtype=np.int32)
s_seq = data_seq.sum()
else:
data_seq = np.zeros([n], dtype=np.int32)
if nb_proc > 1:
data_local = np.zeros([n/nb_proc], dtype=np.int32)
comm.Scatter(data_seq, data_local, root=0)
else:
data_local = data_seq
s = data_local.sum()
if nb_proc > 1:
s = comm.allreduce(s, op=MPI.SUM)
if rank == 0:
print('s: {}; s_seq: {}'.format(s, s_seq))
assert s == s_seq
Build with python setup.py build_ext --inplace and test with python -c "import mod" and mpirun -np 4 python -c "import mod". If mpi4py is not installed, one can still build the module and use it in sequential.

Resources